1 Star 0 Fork 13

DongDu/opensbi

forked from src-openEuler/opensbi 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
0001-Penglai-supports.patch 95.52 KB
一键复制 编辑 原始数据 按行查看 历史
DongDu 提交于 2021-07-07 10:32 . Add Penglai supports for openEuler
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504
From 7a05ac220aa0f9e8afd7389be5ac0160ff6b1511 Mon Sep 17 00:00:00 2001
From: Dong Du <dd_nirvana@sjtu.edu.cn>
Date: Wed, 7 Jul 2021 10:08:53 +0800
Subject: [PATCH] Penglai supports
Signed-off-by: Dong Du <dd_nirvana@sjtu.edu.cn>
---
include/sbi/riscv_encoding.h | 19 +
include/sbi/sbi_ecall.h | 2 +
include/sbi/sbi_ecall_interface.h | 4 +
include/sbi/sbi_pmp.h | 10 +
include/sm/enclave.h | 98 +++
include/sm/enclave_args.h | 31 +
include/sm/math.h | 78 +++
include/sm/platform/pmp/enclave_mm.h | 67 ++
include/sm/platform/pmp/platform.h | 9 +
include/sm/platform/pmp/platform_thread.h | 13 +
include/sm/pmp.h | 71 ++
include/sm/print.h | 15 +
include/sm/sm.h | 80 +++
include/sm/thread.h | 86 +++
include/sm/utils.h | 10 +
include/sm/vm.h | 21 +
lib/sbi/objects.mk | 11 +
lib/sbi/sbi_ecall.c | 12 +
lib/sbi/sbi_ecall_penglai.c | 98 +++
lib/sbi/sbi_hart.c | 4 +
lib/sbi/sbi_init.c | 30 +-
lib/sbi/sbi_pmp.c | 123 ++++
lib/sbi/sbi_trap.c | 14 +
lib/sbi/sm/.gitignore | 1 +
lib/sbi/sm/enclave.c | 796 ++++++++++++++++++++++
lib/sbi/sm/platform/README.md | 9 +
lib/sbi/sm/platform/pmp/enclave_mm.c | 705 +++++++++++++++++++
lib/sbi/sm/platform/pmp/platform.c | 34 +
lib/sbi/sm/platform/pmp/platform_thread.c | 31 +
lib/sbi/sm/pmp.c | 287 ++++++++
lib/sbi/sm/sm.ac | 3 +
lib/sbi/sm/sm.c | 204 ++++++
lib/sbi/sm/sm.mk.in | 25 +
lib/sbi/sm/thread.c | 67 ++
lib/sbi/sm/utils.c | 40 ++
35 files changed, 3106 insertions(+), 2 deletions(-)
create mode 100644 include/sbi/sbi_pmp.h
create mode 100644 include/sm/enclave.h
create mode 100644 include/sm/enclave_args.h
create mode 100644 include/sm/math.h
create mode 100644 include/sm/platform/pmp/enclave_mm.h
create mode 100644 include/sm/platform/pmp/platform.h
create mode 100644 include/sm/platform/pmp/platform_thread.h
create mode 100644 include/sm/pmp.h
create mode 100644 include/sm/print.h
create mode 100644 include/sm/sm.h
create mode 100644 include/sm/thread.h
create mode 100644 include/sm/utils.h
create mode 100644 include/sm/vm.h
create mode 100644 lib/sbi/sbi_ecall_penglai.c
create mode 100644 lib/sbi/sbi_pmp.c
create mode 100644 lib/sbi/sm/.gitignore
create mode 100644 lib/sbi/sm/enclave.c
create mode 100644 lib/sbi/sm/platform/README.md
create mode 100644 lib/sbi/sm/platform/pmp/enclave_mm.c
create mode 100644 lib/sbi/sm/platform/pmp/platform.c
create mode 100644 lib/sbi/sm/platform/pmp/platform_thread.c
create mode 100644 lib/sbi/sm/pmp.c
create mode 100644 lib/sbi/sm/sm.ac
create mode 100644 lib/sbi/sm/sm.c
create mode 100644 lib/sbi/sm/sm.mk.in
create mode 100644 lib/sbi/sm/thread.c
create mode 100644 lib/sbi/sm/utils.c
diff --git a/include/sbi/riscv_encoding.h b/include/sbi/riscv_encoding.h
index e1d0b46..a1cebd7 100644
--- a/include/sbi/riscv_encoding.h
+++ b/include/sbi/riscv_encoding.h
@@ -151,6 +151,22 @@
#define PMP_ADDR_MASK _UL(0xFFFFFFFF)
#endif
+/* page table entry (PTE) fields */
+#define PTE_V _UL(0x001) /* Valid */
+#define PTE_R _UL(0x002) /* Read */
+#define PTE_W _UL(0x004) /* Write */
+#define PTE_X _UL(0x008) /* Execute */
+#define PTE_U _UL(0x010) /* User */
+#define PTE_G _UL(0x020) /* Global */
+#define PTE_A _UL(0x040) /* Accessed */
+#define PTE_D _UL(0x080) /* Dirty */
+#define PTE_SOFT _UL(0x300) /* Reserved for Software */
+
+#define PTE_PPN_SHIFT 10
+
+#define PTE_TABLE(PTE) \
+ (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V)
+
#if __riscv_xlen == 64
#define MSTATUS_SD MSTATUS64_SD
#define SSTATUS_SD SSTATUS64_SD
@@ -171,6 +187,9 @@
#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT
#endif
+#define RISCV_PGSHIFT 12
+#define RISCV_PGSIZE (1 << RISCV_PGSHIFT)
+
/* ===== User-level CSRs ===== */
/* User Trap Setup (N-extension) */
diff --git a/include/sbi/sbi_ecall.h b/include/sbi/sbi_ecall.h
index d357085..b77d252 100644
--- a/include/sbi/sbi_ecall.h
+++ b/include/sbi/sbi_ecall.h
@@ -39,6 +39,8 @@ extern struct sbi_ecall_extension ecall_ipi;
extern struct sbi_ecall_extension ecall_vendor;
extern struct sbi_ecall_extension ecall_hsm;
extern struct sbi_ecall_extension ecall_srst;
+extern struct sbi_ecall_extension ecall_penglai_host;
+extern struct sbi_ecall_extension ecall_penglai_enclave;
u16 sbi_ecall_version_major(void);
diff --git a/include/sbi/sbi_ecall_interface.h b/include/sbi/sbi_ecall_interface.h
index 002c6f9..0bec030 100644
--- a/include/sbi/sbi_ecall_interface.h
+++ b/include/sbi/sbi_ecall_interface.h
@@ -29,6 +29,10 @@
#define SBI_EXT_HSM 0x48534D
#define SBI_EXT_SRST 0x53525354
+//Penglai
+#define SBI_EXT_PENGLAI_HOST 0x100100
+#define SBI_EXT_PENGLAI_ENCLAVE 0x100101
+
/* SBI function IDs for BASE extension*/
#define SBI_EXT_BASE_GET_SPEC_VERSION 0x0
#define SBI_EXT_BASE_GET_IMP_ID 0x1
diff --git a/include/sbi/sbi_pmp.h b/include/sbi/sbi_pmp.h
new file mode 100644
index 0000000..c6ef1fc
--- /dev/null
+++ b/include/sbi/sbi_pmp.h
@@ -0,0 +1,10 @@
+#ifndef __SBI_PMP_H__
+#define __SBI_PMP_H__
+
+#include <sm/pmp.h>
+#include <sbi/sbi_types.h>
+#include <sbi/sbi_hartmask.h>
+struct sbi_scratch;
+int sbi_pmp_init(struct sbi_scratch *scratch, bool cold_boot);
+int sbi_send_pmp(ulong hmask, ulong hbase, struct pmp_data_t* pmp_data);
+#endif
diff --git a/include/sm/enclave.h b/include/sm/enclave.h
new file mode 100644
index 0000000..377ca2e
--- /dev/null
+++ b/include/sm/enclave.h
@@ -0,0 +1,98 @@
+#ifndef _ENCLAVE_H
+#define _ENCLAVE_H
+
+#include <sbi/riscv_asm.h>
+#include <sm/vm.h>
+#include <sbi/riscv_encoding.h>
+#include <sm/enclave_args.h>
+#include <sbi/riscv_atomic.h>
+#include <sm/thread.h>
+#include <stdint.h>
+#include <stddef.h>
+
+#define ENCLAVES_PER_METADATA_REGION 128
+#define ENCLAVE_METADATA_REGION_SIZE ((sizeof(struct enclave_t)) * ENCLAVES_PER_METADATA_REGION)
+
+#define ENCLAVE_MODE 1
+
+// define the time slice for an enclave
+#define ENCLAVE_TIME_CREDITS 100000
+
+struct link_mem_t
+{
+ unsigned long mem_size;
+ unsigned long slab_size;
+ unsigned long slab_num;
+ char* addr;
+ struct link_mem_t* next_link_mem;
+};
+
+typedef enum
+{
+ DESTROYED = -1,
+ INVALID = 0,
+ FRESH = 1,
+ RUNNABLE,
+ RUNNING,
+ STOPPED,
+} enclave_state_t;
+
+/*
+ * enclave memory [paddr, paddr + size]
+ * free_mem @ unused memory address in enclave mem
+ */
+struct enclave_t
+{
+ unsigned int eid;
+ enclave_state_t state;
+
+ //memory region of enclave
+ unsigned long paddr;
+ unsigned long size;
+
+ //address of left available memory in memory region
+ unsigned long free_mem;
+
+ //TODO: dynamically allocated memory
+ unsigned long* enclave_mem_metadata_page;
+
+ //root page table of enclave
+ unsigned long* root_page_table;
+ //root page table register for host
+ unsigned long host_ptbr;
+ //entry point of enclave
+ unsigned long entry_point;
+
+ unsigned long* ocall_func_id;
+ unsigned long* ocall_arg0;
+ unsigned long* ocall_arg1;
+ unsigned long* ocall_syscall_num;
+
+ //shared memory with host
+ unsigned long untrusted_ptr;
+ unsigned long untrusted_size;
+
+ //enclave thread context
+ //TODO: support multiple threads
+ struct thread_state_t thread_context;
+};
+
+struct cpu_state_t
+{
+ int in_enclave;
+ int eid;
+};
+
+uintptr_t copy_from_host(void* dest, void* src, size_t size);
+uintptr_t copy_to_host(void* dest, void* src, size_t size);
+
+uintptr_t create_enclave(struct enclave_sbi_param_t create_args);
+uintptr_t run_enclave(uintptr_t* regs, unsigned int eid);
+uintptr_t stop_enclave(uintptr_t* regs, unsigned int eid);
+uintptr_t destroy_enclave(uintptr_t* regs, unsigned int eid);
+uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid);
+uintptr_t resume_from_stop(uintptr_t* regs, unsigned int eid);
+uintptr_t exit_enclave(uintptr_t* regs, unsigned long retval);
+uintptr_t do_timer_irq(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc);
+
+#endif /* _ENCLAVE_H */
diff --git a/include/sm/enclave_args.h b/include/sm/enclave_args.h
new file mode 100644
index 0000000..6516f70
--- /dev/null
+++ b/include/sm/enclave_args.h
@@ -0,0 +1,31 @@
+#ifndef _ENCLAVE_ARGS_H
+#define _ENCLAVE_ARGS_H
+#include "thread.h"
+
+struct mm_alloc_arg_t
+{
+ unsigned long req_size;
+ uintptr_t resp_addr;
+ unsigned long resp_size;
+};
+
+/*
+ * enclave memory [paddr, paddr + size]
+ * free_mem @ unused memory address in enclave mem
+ */
+struct enclave_sbi_param_t
+{
+ unsigned int *eid_ptr;
+ unsigned long paddr;
+ unsigned long size;
+ unsigned long entry_point;
+ unsigned long untrusted_ptr;
+ unsigned long untrusted_size;
+ unsigned long free_mem;
+ unsigned long *ecall_arg0;
+ unsigned long *ecall_arg1;
+ unsigned long *ecall_arg2;
+ unsigned long *ecall_arg3;
+};
+
+#endif /* _ENCLAVE_ARGS_H */
diff --git a/include/sm/math.h b/include/sm/math.h
new file mode 100644
index 0000000..7a665b2
--- /dev/null
+++ b/include/sm/math.h
@@ -0,0 +1,78 @@
+#ifndef _MATH_H
+#define _MATH_H
+
+#define ilog2(n) \
+( \
+ (n) < 2 ? 0 : \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+ (n) & (1ULL << 60) ? 60 : \
+ (n) & (1ULL << 59) ? 59 : \
+ (n) & (1ULL << 58) ? 58 : \
+ (n) & (1ULL << 57) ? 57 : \
+ (n) & (1ULL << 56) ? 56 : \
+ (n) & (1ULL << 55) ? 55 : \
+ (n) & (1ULL << 54) ? 54 : \
+ (n) & (1ULL << 53) ? 53 : \
+ (n) & (1ULL << 52) ? 52 : \
+ (n) & (1ULL << 51) ? 51 : \
+ (n) & (1ULL << 50) ? 50 : \
+ (n) & (1ULL << 49) ? 49 : \
+ (n) & (1ULL << 48) ? 48 : \
+ (n) & (1ULL << 47) ? 47 : \
+ (n) & (1ULL << 46) ? 46 : \
+ (n) & (1ULL << 45) ? 45 : \
+ (n) & (1ULL << 44) ? 44 : \
+ (n) & (1ULL << 43) ? 43 : \
+ (n) & (1ULL << 42) ? 42 : \
+ (n) & (1ULL << 41) ? 41 : \
+ (n) & (1ULL << 40) ? 40 : \
+ (n) & (1ULL << 39) ? 39 : \
+ (n) & (1ULL << 38) ? 38 : \
+ (n) & (1ULL << 37) ? 37 : \
+ (n) & (1ULL << 36) ? 36 : \
+ (n) & (1ULL << 35) ? 35 : \
+ (n) & (1ULL << 34) ? 34 : \
+ (n) & (1ULL << 33) ? 33 : \
+ (n) & (1ULL << 32) ? 32 : \
+ (n) & (1ULL << 31) ? 31 : \
+ (n) & (1ULL << 30) ? 30 : \
+ (n) & (1ULL << 29) ? 29 : \
+ (n) & (1ULL << 28) ? 28 : \
+ (n) & (1ULL << 27) ? 27 : \
+ (n) & (1ULL << 26) ? 26 : \
+ (n) & (1ULL << 25) ? 25 : \
+ (n) & (1ULL << 24) ? 24 : \
+ (n) & (1ULL << 23) ? 23 : \
+ (n) & (1ULL << 22) ? 22 : \
+ (n) & (1ULL << 21) ? 21 : \
+ (n) & (1ULL << 20) ? 20 : \
+ (n) & (1ULL << 19) ? 19 : \
+ (n) & (1ULL << 18) ? 18 : \
+ (n) & (1ULL << 17) ? 17 : \
+ (n) & (1ULL << 16) ? 16 : \
+ (n) & (1ULL << 15) ? 15 : \
+ (n) & (1ULL << 14) ? 14 : \
+ (n) & (1ULL << 13) ? 13 : \
+ (n) & (1ULL << 12) ? 12 : \
+ (n) & (1ULL << 11) ? 11 : \
+ (n) & (1ULL << 10) ? 10 : \
+ (n) & (1ULL << 9) ? 9 : \
+ (n) & (1ULL << 8) ? 8 : \
+ (n) & (1ULL << 7) ? 7 : \
+ (n) & (1ULL << 6) ? 6 : \
+ (n) & (1ULL << 5) ? 5 : \
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+ 1 \
+)
+
+#define power_2_align(n) (1 << (ilog2(n-1)+1))
+
+#define size_down_align(n, size) (n - ((n) % (size)))
+
+#define size_up_align(n, size) (size_down_align(n, size) + ((n) % (size) ? (size) : 0))
+
+#endif /* _MATH_H */
diff --git a/include/sm/platform/pmp/enclave_mm.h b/include/sm/platform/pmp/enclave_mm.h
new file mode 100644
index 0000000..dcab9b4
--- /dev/null
+++ b/include/sm/platform/pmp/enclave_mm.h
@@ -0,0 +1,67 @@
+#ifndef _ENCLAVE_MM_H
+#define _ENCLAVE_MM_H
+
+#include <stdint.h>
+#include <sm/pmp.h>
+#include <sm/enclave.h>
+
+#define N_PMP_REGIONS (NPMP - 3)
+
+#define REGION_TO_PMP(region_idx) (region_idx + 2) //from the 3rd to the N-1 regions
+#define PMP_TO_REGION(pmp_idx) (pmp_idx - 2)
+
+/*
+ * Layout of free memory chunk
+ * | struct mm_list_head_t | struct mm_list_t | 00...0 |
+ * | struct mm_list_head_t | struct mm_list_t | 00...0 |
+ * | struct mm_list_head_t | struct mm_list_t | 00...0 |
+ */
+struct mm_list_t
+{
+ int order;
+ struct mm_list_t *prev_mm;
+ struct mm_list_t *next_mm;
+};
+
+struct mm_list_head_t
+{
+ int order;
+ struct mm_list_head_t *prev_list_head;
+ struct mm_list_head_t *next_list_head;
+ struct mm_list_t *mm_list;
+};
+
+#define MM_LIST_2_PADDR(mm_list) ((void*)(mm_list) - sizeof(struct mm_list_head_t))
+#define PADDR_2_MM_LIST(paddr) ((void*)(paddr) + sizeof(struct mm_list_head_t))
+
+struct mm_region_t
+{
+ int valid;
+ uintptr_t paddr;
+ unsigned long size;
+ struct mm_list_head_t *mm_list_head;
+};
+
+#define region_overlap(pa0, size0, pa1, size1) (((pa0<=pa1) && ((pa0+size0)>pa1)) \
+ || ((pa1<=pa0) && ((pa1+size1)>pa0)))
+
+#define region_contain(pa0, size0, pa1, size1) (((unsigned long)(pa0) <= (unsigned long)(pa1)) \
+ && (((unsigned long)(pa0) + (unsigned long)(size0)) >= ((unsigned long)(pa1) + (unsigned long)(size1))))
+
+int grant_kernel_access(void* paddr, unsigned long size);
+
+int grant_enclave_access(struct enclave_t* enclave);
+
+int retrieve_kernel_access(void* paddr, unsigned long size);
+
+int retrieve_enclave_access(struct enclave_t *enclave);
+
+uintptr_t mm_init(uintptr_t paddr, unsigned long size);
+
+void* mm_alloc(unsigned long req_size, unsigned long* resp_size);
+
+int mm_free(void* paddr, unsigned long size);
+
+void print_buddy_system();
+
+#endif /* _ENCLAVE_MM_H */
diff --git a/include/sm/platform/pmp/platform.h b/include/sm/platform/pmp/platform.h
new file mode 100644
index 0000000..cb891e2
--- /dev/null
+++ b/include/sm/platform/pmp/platform.h
@@ -0,0 +1,9 @@
+#ifndef _PLATFORM_H
+#define _PLATFORM_H
+
+#include "enclave_mm.h"
+#include "platform_thread.h"
+
+int platform_init();
+
+#endif /* _PLATFORM_H */
diff --git a/include/sm/platform/pmp/platform_thread.h b/include/sm/platform/pmp/platform_thread.h
new file mode 100644
index 0000000..36a7e72
--- /dev/null
+++ b/include/sm/platform/pmp/platform_thread.h
@@ -0,0 +1,13 @@
+#ifndef _PLATFORM_THREAD_H
+#define _PLATFORM_THREAD_H
+
+#include <sm/thread.h>
+
+void platform_enter_enclave_world();
+void platform_exit_enclave_world();
+int platform_check_in_enclave_world();
+int platform_check_enclave_authentication();
+void platform_switch_to_enclave_ptbr(struct thread_state_t* thread, uintptr_t ptbr);
+void platform_switch_to_host_ptbr(struct thread_state_t* thread, uintptr_t ptbr);
+
+#endif /* _PLATFORM_THREAD_H */
diff --git a/include/sm/pmp.h b/include/sm/pmp.h
new file mode 100644
index 0000000..a88371f
--- /dev/null
+++ b/include/sm/pmp.h
@@ -0,0 +1,71 @@
+#ifndef _PMP_H
+#define _PMP_H
+
+#include <stdint.h>
+#include <sbi/sbi_types.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_asm.h>
+#include <sbi/sbi_hartmask.h>
+
+//number of PMP registers
+#define NPMP 16
+
+#define PMP_OFF 0x00
+#define PMP_NO_PERM 0
+
+//pmpfcg register's structure
+//|63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0|
+//| pmp7cfg | pmp6cfg | pmp5cfg | pmp4cfg | pmp3cfg | pmp2cfg | pmp1cfg | pmp1cfg |
+#define PMP_PER_CFG_REG 8
+#define PMPCFG_BIT_NUM 8
+#define PMPCFG_BITS 0xFF
+
+#define PMP_SET(num, cfg_index, pmpaddr, pmpcfg) do { \
+ uintptr_t oldcfg = csr_read(CSR_PMPCFG##cfg_index); \
+ pmpcfg |= (oldcfg & ~((uintptr_t)PMPCFG_BITS << (uintptr_t)PMPCFG_BIT_NUM*(num%PMP_PER_CFG_REG))); \
+ asm volatile ("la t0, 1f\n\t" \
+ "csrrw t0, mtvec, t0\n\t" \
+ "csrw pmpaddr"#num", %0\n\t" \
+ "csrw pmpcfg"#cfg_index", %1\n\t" \
+ "sfence.vma\n\t"\
+ ".align 2\n\t" \
+ "1: csrw mtvec, t0 \n\t" \
+ : : "r" (pmpaddr), "r" (pmpcfg) : "t0"); \
+} while(0)
+
+#define PMP_READ(num, cfg_index, pmpaddr, pmpcfg) do { \
+ asm volatile("csrr %0, pmpaddr"#num : "=r"(pmpaddr) :); \
+ asm volatile("csrr %0, pmpcfg"#cfg_index : "=r"(pmpcfg) :); \
+} while(0)
+
+struct pmp_config_t
+{
+ uintptr_t paddr;
+ unsigned long size;
+ uintptr_t perm;
+ uintptr_t mode;
+};
+
+struct pmp_data_t
+{
+ struct pmp_config_t pmp_config_arg;
+ int pmp_idx_arg;
+ struct sbi_hartmask smask;
+};
+
+#define SBI_PMP_DATA_INIT(__ptr, __pmp_config_arg, __pmp_idx_arg, __src) \
+do { \
+ (__ptr)->pmp_config_arg = (__pmp_config_arg); \
+ (__ptr)->pmp_idx_arg = (__pmp_idx_arg); \
+ SBI_HARTMASK_INIT_EXCEPT(&(__ptr)->smask, (__src)); \
+} while (0)
+
+
+void set_pmp_and_sync(int pmp_idx, struct pmp_config_t);
+void clear_pmp_and_sync(int pmp_idx);
+void set_pmp(int pmp_idx, struct pmp_config_t);
+void clear_pmp(int pmp_idx);
+struct pmp_config_t get_pmp(int pmp_idx);
+void dump_pmps(void);
+
+#endif /* _PMP_H */
diff --git a/include/sm/print.h b/include/sm/print.h
new file mode 100644
index 0000000..29118cd
--- /dev/null
+++ b/include/sm/print.h
@@ -0,0 +1,15 @@
+#ifndef SM_PRINT_H
+#define SM_PRINT_H
+
+#include <sbi/sbi_console.h>
+
+#ifdef PENGLAI_DEBUG
+#define printm(...) sbi_printf(__VA_ARGS__)
+#else
+#define printm(...)
+#endif
+
+//For report error messages, always enabled
+#define printm_err(...) sbi_printf(__VA_ARGS__)
+
+#endif
diff --git a/include/sm/sm.h b/include/sm/sm.h
new file mode 100644
index 0000000..db0b49e
--- /dev/null
+++ b/include/sm/sm.h
@@ -0,0 +1,80 @@
+#ifndef _SM_H
+#define _SM_H
+
+//#ifndef TARGET_PLATFORM_HEADER
+//#error "SM requires to specify a certain platform"
+//#endif
+
+//#include TARGET_PLATFORM_HEADER
+#include <sm/print.h>
+#include <sm/platform/pmp/platform.h>
+#include <stdint.h>
+#include <sm/enclave_args.h>
+
+/*
+ * Note: the hard-coded SM base and size depends on the M-mode firmware,
+ * e.g., in OpenSBI, you should check the firmware range in platform/generic/config.mk
+ * */
+#define SM_BASE 0x80000000
+#define SM_SIZE 0x200000
+
+#define MAX_HARTS 8
+
+//Host SBI numbers
+#define SBI_MM_INIT 100
+#define SBI_CREATE_ENCLAVE 99
+#define SBI_ATTEST_ENCLAVE 98
+#define SBI_RUN_ENCLAVE 97
+#define SBI_STOP_ENCLAVE 96
+#define SBI_RESUME_ENCLAVE 95
+#define SBI_DESTROY_ENCLAVE 94
+#define SBI_ALLOC_ENCLAVE_MM 93
+#define SBI_MEMORY_EXTEND 92
+#define SBI_MEMORY_RECLAIM 91
+#define SBI_ENCLAVE_OCALL 90
+#define SBI_DEBUG_PRINT 88
+
+//Enclave SBI numbers
+#define SBI_EXIT_ENCLAVE 99
+
+//Error code of SBI_ALLOC_ENCLAVE_MEM
+#define ENCLAVE_NO_MEMORY -2
+#define ENCLAVE_ERROR -1
+#define ENCLAVE_SUCCESS 0
+#define ENCLAVE_TIMER_IRQ 1
+
+//error code of SBI_RESUME_RNCLAVE
+#define RESUME_FROM_TIMER_IRQ 2000
+#define RESUME_FROM_STOP 2003
+
+void sm_init();
+
+uintptr_t sm_mm_init(uintptr_t paddr, unsigned long size);
+
+uintptr_t sm_mm_extend(uintptr_t paddr, unsigned long size);
+
+uintptr_t sm_alloc_enclave_mem(uintptr_t mm_alloc_arg);
+
+uintptr_t sm_create_enclave(uintptr_t enclave_create_args);
+
+uintptr_t sm_attest_enclave(uintptr_t enclave_id, uintptr_t report, uintptr_t nonce);
+
+uintptr_t sm_run_enclave(uintptr_t *regs, uintptr_t enclave_id);
+
+uintptr_t sm_debug_print(uintptr_t *regs, uintptr_t enclave_id);
+
+uintptr_t sm_stop_enclave(uintptr_t *regs, uintptr_t enclave_id);
+
+uintptr_t sm_resume_enclave(uintptr_t *regs, uintptr_t enclave_id);
+
+uintptr_t sm_destroy_enclave(uintptr_t *regs, uintptr_t enclave_id);
+
+uintptr_t sm_enclave_ocall(uintptr_t *regs, uintptr_t ocall_func_id, uintptr_t arg);
+
+uintptr_t sm_exit_enclave(uintptr_t *regs, unsigned long retval);
+
+uintptr_t sm_do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc);
+
+int check_in_enclave_world();
+
+#endif /* _SM_H */
diff --git a/include/sm/thread.h b/include/sm/thread.h
new file mode 100644
index 0000000..1d3db91
--- /dev/null
+++ b/include/sm/thread.h
@@ -0,0 +1,86 @@
+#ifndef __THREAD_H__
+#define __THREAD_H__
+
+#include <stdint.h>
+
+//default layout of enclave
+//#####################
+//# reserved for #
+//# s mode #
+//##################### 0xffffffe000000000
+//# hole #
+//##################### 0x0000004000000000
+//# stack #
+//# #
+//# heap #
+//##################### 0x0000002000000000
+//# untrusted memory #
+//# shared with host #
+//##################### 0x0000001000000000
+//# code & data #
+//##################### 0x0000000000001000
+//# hole #
+//##################### 0x0
+
+#define ENCLAVE_DEFAULT_STACK 0x0000004000000000;
+
+#define N_GENERAL_REGISTERS 32
+
+struct general_registers_t
+{
+ uintptr_t slot;
+ uintptr_t ra;
+ uintptr_t sp;
+ uintptr_t gp;
+ uintptr_t tp;
+ uintptr_t t0;
+ uintptr_t t1;
+ uintptr_t t2;
+ uintptr_t s0;
+ uintptr_t s1;
+ uintptr_t a0;
+ uintptr_t a1;
+ uintptr_t a2;
+ uintptr_t a3;
+ uintptr_t a4;
+ uintptr_t a5;
+ uintptr_t a6;
+ uintptr_t a7;
+ uintptr_t s2;
+ uintptr_t s3;
+ uintptr_t s4;
+ uintptr_t s5;
+ uintptr_t s6;
+ uintptr_t s7;
+ uintptr_t s8;
+ uintptr_t s9;
+ uintptr_t s10;
+ uintptr_t s11;
+ uintptr_t t3;
+ uintptr_t t4;
+ uintptr_t t5;
+ uintptr_t t6;
+};
+
+/* enclave thread state */
+struct thread_state_t
+{
+ uintptr_t encl_ptbr;
+ uintptr_t prev_stvec;
+ uintptr_t prev_mie;
+ uintptr_t prev_mideleg;
+ uintptr_t prev_medeleg;
+ uintptr_t prev_mepc;
+ uintptr_t prev_cache_binding;
+ struct general_registers_t prev_state;
+};
+
+/* swap previous and current thread states */
+void swap_prev_state(struct thread_state_t* state, uintptr_t* regs);
+void swap_prev_mepc(struct thread_state_t* state, uintptr_t mepc);
+void swap_prev_stvec(struct thread_state_t* state, uintptr_t stvec);
+void swap_prev_cache_binding(struct thread_state_t* state, uintptr_t cache_binding);
+void swap_prev_mie(struct thread_state_t* state, uintptr_t mie);
+void swap_prev_mideleg(struct thread_state_t* state, uintptr_t mideleg);
+void swap_prev_medeleg(struct thread_state_t* state, uintptr_t medeleg);
+#endif /* thread */
diff --git a/include/sm/utils.h b/include/sm/utils.h
new file mode 100644
index 0000000..e3f2fab
--- /dev/null
+++ b/include/sm/utils.h
@@ -0,0 +1,10 @@
+// See LICENSE for license details.
+
+#ifndef _RISCV_SM_UTILS_H
+#define _RISCV_SM_UTILS_H
+
+#include <sbi/riscv_encoding.h>
+
+void dump_pt(unsigned long *page_table, int level);
+
+#endif
diff --git a/include/sm/vm.h b/include/sm/vm.h
new file mode 100644
index 0000000..2cce276
--- /dev/null
+++ b/include/sm/vm.h
@@ -0,0 +1,21 @@
+#ifndef _VM_H
+#define _VM_H
+
+#include <sbi/riscv_encoding.h>
+#include <stdint.h>
+
+#define MEGAPAGE_SIZE ((uintptr_t)(RISCV_PGSIZE << RISCV_PGLEVEL_BITS))
+
+#if __riscv_xlen == 64
+
+# define SATP_MODE_CHOICE INSERT_FIELD(0, SATP64_MODE, SATP_MODE_SV39)
+# define VA_BITS 39
+# define GIGAPAGE_SIZE (MEGAPAGE_SIZE << RISCV_PGLEVEL_BITS)
+
+#else
+
+# define SATP_MODE_CHOICE INSERT_FIELD(0, SATP32_MODE, SATP_MODE_SV32)
+# define VA_BITS 32
+#endif
+
+#endif
diff --git a/lib/sbi/objects.mk b/lib/sbi/objects.mk
index 6f2c06f..605d39f 100644
--- a/lib/sbi/objects.mk
+++ b/lib/sbi/objects.mk
@@ -41,3 +41,14 @@ libsbi-objs-y += sbi_tlb.o
libsbi-objs-y += sbi_trap.o
libsbi-objs-y += sbi_unpriv.o
libsbi-objs-y += sbi_expected_trap.o
+libsbi-objs-y += sbi_pmp.o
+
+## Add by Dong Du
+# The Penglai related files here
+libsbi-objs-y += sbi_ecall_penglai.o
+libsbi-objs-y += sm/enclave.o
+libsbi-objs-y += sm/pmp.o
+libsbi-objs-y += sm/sm.o
+libsbi-objs-y += sm/thread.o
+libsbi-objs-y += sm/utils.o
+libsbi-objs-y += sm/platform/pmp/platform.o
diff --git a/lib/sbi/sbi_ecall.c b/lib/sbi/sbi_ecall.c
index e92a539..22cb677 100644
--- a/lib/sbi/sbi_ecall.c
+++ b/lib/sbi/sbi_ecall.c
@@ -116,6 +116,12 @@ int sbi_ecall_handler(struct sbi_trap_regs *regs)
if (ret == SBI_ETRAP) {
trap.epc = regs->mepc;
sbi_trap_redirect(regs, &trap);
+ } else if (extension_id == SBI_EXT_PENGLAI_HOST ||
+ extension_id == SBI_EXT_PENGLAI_ENCLAVE) {
+ regs->a0 = ret;
+ if (!is_0_1_spec)
+ regs->a1 = out_val;
+
} else {
if (ret < SBI_LAST_ERR) {
sbi_printf("%s: Invalid error %d for ext=0x%lx "
@@ -168,6 +174,12 @@ int sbi_ecall_init(void)
if (ret)
return ret;
ret = sbi_ecall_register_extension(&ecall_vendor);
+ if (ret)
+ return ret;
+ ret = sbi_ecall_register_extension(&ecall_penglai_host);
+ if (ret)
+ return ret;
+ ret = sbi_ecall_register_extension(&ecall_penglai_enclave);
if (ret)
return ret;
diff --git a/lib/sbi/sbi_ecall_penglai.c b/lib/sbi/sbi_ecall_penglai.c
new file mode 100644
index 0000000..b6a1395
--- /dev/null
+++ b/lib/sbi/sbi_ecall_penglai.c
@@ -0,0 +1,98 @@
+/*
+ * Authors:
+ * Dong Du <Dd_nirvana@sjtu.edu.cn>
+ * Erhu Feng <2748250768@qq.com>
+ */
+
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_trap.h>
+#include <sbi/sbi_version.h>
+#include <sbi/riscv_asm.h>
+#include <sbi/sbi_console.h>
+#include <sm/sm.h>
+
+
+static int sbi_ecall_penglai_host_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs, unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ uintptr_t ret = 0;
+
+ //csr_write(CSR_MEPC, regs->mepc + 4);
+ ((struct sbi_trap_regs *)regs)->mepc += 4;
+
+ switch (funcid) {
+ // The following is the Penglai's Handler
+ case SBI_MM_INIT:
+ ret = sm_mm_init(regs->a0, regs->a1);
+ break;
+ case SBI_MEMORY_EXTEND:
+ ret = sm_mm_extend(regs->a0, regs->a1);
+ break;
+ case SBI_ALLOC_ENCLAVE_MM:
+ ret = sm_alloc_enclave_mem(regs->a0);
+ break;
+ case SBI_CREATE_ENCLAVE:
+ ret = sm_create_enclave(regs->a0);
+ break;
+ case SBI_RUN_ENCLAVE:
+ ret = sm_run_enclave((uintptr_t *)regs, regs->a0);
+ break;
+ case SBI_STOP_ENCLAVE:
+ ret = sm_stop_enclave((uintptr_t *)regs, regs->a0);
+ break;
+ case SBI_RESUME_ENCLAVE:
+ ret = sm_resume_enclave((uintptr_t *)regs, regs->a0);
+ break;
+ case SBI_DESTROY_ENCLAVE:
+ ret = sm_destroy_enclave((uintptr_t *)regs, regs->a0);
+ break;
+ case SBI_ATTEST_ENCLAVE:
+ ret = -1;
+ sbi_printf("[Penglai@Monitor] attest interface not supported yet\n");
+ break;
+ default:
+ sbi_printf("[Penglai@Monitor] host interface(funcid:%ld) not supported yet\n", funcid);
+ ret = SBI_ENOTSUPP;
+ }
+ //((struct sbi_trap_regs *)regs)->mepc = csr_read(CSR_MEPC);
+ //((struct sbi_trap_regs *)regs)->mstatus = csr_read(CSR_MSTATUS);
+ *out_val = ret;
+ return ret;
+}
+
+struct sbi_ecall_extension ecall_penglai_host = {
+ .extid_start = SBI_EXT_PENGLAI_HOST,
+ .extid_end = SBI_EXT_PENGLAI_HOST,
+ .handle = sbi_ecall_penglai_host_handler,
+};
+
+static int sbi_ecall_penglai_enclave_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs, unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ uintptr_t ret = 0;
+
+ //csr_write(CSR_MEPC, regs->mepc + 4);
+ ((struct sbi_trap_regs *)regs)->mepc += 4;
+
+ switch (funcid) {
+ // The following is the Penglai's Handler
+ case SBI_EXIT_ENCLAVE:
+ ret = sm_exit_enclave((uintptr_t *)regs, regs->a0);
+ break;
+ default:
+ sbi_printf("[Penglai@Monitor] enclave interface(funcid:%ld) not supported yet\n", funcid);
+ ret = SBI_ENOTSUPP;
+ }
+ *out_val = ret;
+ return ret;
+}
+
+struct sbi_ecall_extension ecall_penglai_enclave = {
+ .extid_start = SBI_EXT_PENGLAI_ENCLAVE,
+ .extid_end = SBI_EXT_PENGLAI_ENCLAVE,
+ .handle = sbi_ecall_penglai_enclave_handler,
+};
diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c
index fc86e9f..9bd0499 100644
--- a/lib/sbi/sbi_hart.c
+++ b/lib/sbi/sbi_hart.c
@@ -21,6 +21,7 @@
#include <sbi/sbi_platform.h>
#include <sbi/sbi_string.h>
#include <sbi/sbi_trap.h>
+#include <sm/sm.h>
extern void __sbi_expected_trap(void);
extern void __sbi_expected_trap_hext(void);
@@ -529,6 +530,9 @@ sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1,
}
}
+ //Init Penglai SM here
+ sm_init();
+
register unsigned long a0 asm("a0") = arg0;
register unsigned long a1 asm("a1") = arg1;
__asm__ __volatile__("mret" : : "r"(a0), "r"(a1));
diff --git a/lib/sbi/sbi_init.c b/lib/sbi/sbi_init.c
index 0e82458..68c21f5 100644
--- a/lib/sbi/sbi_init.c
+++ b/lib/sbi/sbi_init.c
@@ -23,6 +23,7 @@
#include <sbi/sbi_string.h>
#include <sbi/sbi_timer.h>
#include <sbi/sbi_tlb.h>
+#include <sbi/sbi_pmp.h>
#include <sbi/sbi_version.h>
#define BANNER \
@@ -41,9 +42,9 @@ static void sbi_boot_print_banner(struct sbi_scratch *scratch)
return;
#ifdef OPENSBI_VERSION_GIT
- sbi_printf("\nOpenSBI %s\n", OPENSBI_VERSION_GIT);
+ sbi_printf("\nOpenSBI %s (with Penglai TEE)\n", OPENSBI_VERSION_GIT);
#else
- sbi_printf("\nOpenSBI v%d.%d\n", OPENSBI_VERSION_MAJOR,
+ sbi_printf("\nOpenSBI v%d.%d (with Penglai TEE)\n", OPENSBI_VERSION_MAJOR,
OPENSBI_VERSION_MINOR);
#endif
@@ -252,6 +253,13 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
sbi_hart_hang();
}
+ /* Penglai PMP init for synchronize PMP settings among Harts */
+ rc = sbi_pmp_init(scratch, TRUE);
+ if (rc) {
+ sbi_printf("%s: (penglai) pmp init failed (error %d)\n", __func__, rc);
+ sbi_hart_hang();
+ }
+
rc = sbi_timer_init(scratch, TRUE);
if (rc) {
sbi_printf("%s: timer init failed (error %d)\n", __func__, rc);
@@ -281,6 +289,11 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
sbi_boot_print_domains(scratch);
+ /*
+ * Note (DD):
+ * In our case, the PMP set by domain will be erased, as penglai
+ * will take control of PMP
+ * */
rc = sbi_hart_pmp_configure(scratch);
if (rc) {
sbi_printf("%s: PMP configure failed (error %d)\n",
@@ -301,6 +314,8 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
sbi_boot_print_hart(scratch, hartid);
+ sbi_printf("[Penglai] Penglai Enclave Preparing\n");
+
wake_coldboot_harts(scratch, hartid);
init_count = sbi_scratch_offset_ptr(scratch, init_count_offset);
@@ -346,10 +361,21 @@ static void __noreturn init_warmboot(struct sbi_scratch *scratch, u32 hartid)
if (rc)
sbi_hart_hang();
+ rc = sbi_pmp_init(scratch, FALSE);
+ if (rc) {
+ sbi_printf("%s: (penglai) pmp init failed (error %d)\n", __func__, rc);
+ sbi_hart_hang();
+ }
+
rc = sbi_timer_init(scratch, FALSE);
if (rc)
sbi_hart_hang();
+ /*
+ * Note (DD):
+ * In our case, the PMP set by domain will be erased, as penglai
+ * will take control of PMP
+ * */
rc = sbi_hart_pmp_configure(scratch);
if (rc)
sbi_hart_hang();
diff --git a/lib/sbi/sbi_pmp.c b/lib/sbi/sbi_pmp.c
new file mode 100644
index 0000000..935ca7b
--- /dev/null
+++ b/lib/sbi/sbi_pmp.c
@@ -0,0 +1,123 @@
+#include <sbi/sbi_pmp.h>
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_fifo.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_tlb.h>
+#include <sbi/sbi_hfence.h>
+#include <sbi/sbi_string.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_hartmask.h>
+
+static unsigned long pmp_data_offset;
+static unsigned long pmp_sync_offset;
+
+static void sbi_process_pmp(struct sbi_scratch *scratch)
+{
+ struct pmp_data_t *data = sbi_scratch_offset_ptr(scratch, pmp_data_offset);
+ struct pmp_config_t pmp_config = *(struct pmp_config_t*)(data);
+ struct sbi_scratch *rscratch = NULL;
+ u32 rhartid;
+ unsigned long *pmp_sync = NULL;
+ int pmp_idx = data->pmp_idx_arg;
+ set_pmp(pmp_idx, pmp_config);
+
+ //sync
+ sbi_hartmask_for_each_hart(rhartid, &data->smask) {
+ rscratch = sbi_hartid_to_scratch(rhartid);
+ if (!rscratch)
+ continue;
+ pmp_sync = sbi_scratch_offset_ptr(rscratch, pmp_sync_offset);
+ while (atomic_raw_xchg_ulong(pmp_sync, 1));
+ }
+}
+
+static int sbi_update_pmp(struct sbi_scratch *scratch,
+ struct sbi_scratch *remote_scratch,
+ u32 remote_hartid, void *data)
+{
+ struct pmp_data_t *pmp_data = NULL;
+ int pmp_idx = 0;
+ u32 curr_hartid = current_hartid();
+
+ if (remote_hartid == curr_hartid) {
+ //update the pmp register locally
+ struct pmp_config_t pmp_config = *(struct pmp_config_t*)(data);
+ pmp_idx = ((struct pmp_data_t *)data)->pmp_idx_arg;
+ set_pmp(pmp_idx, pmp_config);
+ return -1;
+ }
+
+ pmp_data = sbi_scratch_offset_ptr(remote_scratch, pmp_data_offset);
+ //update the remote hart pmp data
+ sbi_memcpy(pmp_data, data, sizeof(struct pmp_data_t));
+
+ return 0;
+}
+
+static void sbi_pmp_sync(struct sbi_scratch *scratch)
+{
+ unsigned long *pmp_sync =
+ sbi_scratch_offset_ptr(scratch, pmp_sync_offset);
+ //wait the remote hart process the pmp signal
+ while (!atomic_raw_xchg_ulong(pmp_sync, 0));
+ return;
+}
+
+static struct sbi_ipi_event_ops pmp_ops = {
+ .name = "IPI_PMP",
+ .update = sbi_update_pmp,
+ .sync = sbi_pmp_sync,
+ .process = sbi_process_pmp,
+};
+
+static u32 pmp_event = SBI_IPI_EVENT_MAX;
+
+int sbi_send_pmp(ulong hmask, ulong hbase, struct pmp_data_t* pmp_data)
+{
+ return sbi_ipi_send_many(hmask, hbase, pmp_event, pmp_data);
+}
+
+int sbi_pmp_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ int ret;
+ struct pmp_data_t *pmpdata;
+ unsigned long *pmp_sync;
+
+ if (cold_boot) {
+ //Define the pmp data offset in the scratch
+ pmp_data_offset = sbi_scratch_alloc_offset(sizeof(*pmpdata),
+ "PMP_DATA");
+ if (!pmp_data_offset)
+ return SBI_ENOMEM;
+
+ pmp_sync_offset = sbi_scratch_alloc_offset(sizeof(*pmp_sync),
+ "PMP_SYNC");
+ if (!pmp_sync_offset)
+ return SBI_ENOMEM;
+
+ pmpdata = sbi_scratch_offset_ptr(scratch,
+ pmp_data_offset);
+
+ pmp_sync = sbi_scratch_offset_ptr(scratch,
+ pmp_sync_offset);
+
+ *pmp_sync = 0;
+
+ ret = sbi_ipi_event_create(&pmp_ops);
+ if (ret < 0) {
+ sbi_scratch_free_offset(pmp_data_offset);
+ return ret;
+ }
+ pmp_event = ret;
+ } else {
+ //do nothing for warmboot
+ }
+
+ return 0;
+}
diff --git a/lib/sbi/sbi_trap.c b/lib/sbi/sbi_trap.c
index b7349d2..110292b 100644
--- a/lib/sbi/sbi_trap.c
+++ b/lib/sbi/sbi_trap.c
@@ -20,6 +20,8 @@
#include <sbi/sbi_timer.h>
#include <sbi/sbi_trap.h>
+#include <sm/sm.h>
+
static void __noreturn sbi_trap_error(const char *msg, int rc,
ulong mcause, ulong mtval, ulong mtval2,
ulong mtinst, struct sbi_trap_regs *regs)
@@ -228,6 +230,9 @@ void sbi_trap_handler(struct sbi_trap_regs *regs)
switch (mcause) {
case IRQ_M_TIMER:
sbi_timer_process();
+ if (check_in_enclave_world() >=0) { //handle timer for enclaves
+ sm_do_timer_irq( (uintptr_t *)regs, mcause, regs->mepc);
+ }
break;
case IRQ_M_SOFT:
sbi_ipi_process();
@@ -252,6 +257,15 @@ void sbi_trap_handler(struct sbi_trap_regs *regs)
rc = sbi_misaligned_store_handler(mtval, mtval2, mtinst, regs);
msg = "misaligned store handler failed";
break;
+ case CAUSE_USER_ECALL:
+ //The only case for USER_ECALL is issued by Penglai Enclave now
+ if (check_in_enclave_world() <0) {
+ sbi_printf("[Penglai] Error, user ecall not in enclaves\n");
+ rc = -1;
+ break;
+ } else {// continue to sbi_ecall_handler
+ //sbi_printf("[Penglai] ecall from enclaves\n");
+ }
case CAUSE_SUPERVISOR_ECALL:
case CAUSE_MACHINE_ECALL:
rc = sbi_ecall_handler(regs);
diff --git a/lib/sbi/sm/.gitignore b/lib/sbi/sm/.gitignore
new file mode 100644
index 0000000..751553b
--- /dev/null
+++ b/lib/sbi/sm/.gitignore
@@ -0,0 +1 @@
+*.bak
diff --git a/lib/sbi/sm/enclave.c b/lib/sbi/sm/enclave.c
new file mode 100644
index 0000000..a93c04c
--- /dev/null
+++ b/lib/sbi/sm/enclave.c
@@ -0,0 +1,796 @@
+#include <sm/print.h>
+#include <sm/enclave.h>
+#include <sm/sm.h>
+#include <sm/math.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_string.h>
+#include <sbi/riscv_locks.h>
+#include <sm/platform/pmp/platform.h>
+#include <sm/utils.h>
+#include <sbi/sbi_timer.h>
+
+static struct cpu_state_t cpus[MAX_HARTS] = {{0,}, };
+
+//spinlock
+static spinlock_t enclave_metadata_lock = SPIN_LOCK_INITIALIZER;
+
+//enclave metadata
+struct link_mem_t* enclave_metadata_head = NULL;
+struct link_mem_t* enclave_metadata_tail = NULL;
+
+uintptr_t copy_from_host(void* dest, void* src, size_t size)
+{
+ /* TODO: checking */
+ sbi_memcpy(dest, src, size);
+ return 0;
+}
+
+uintptr_t copy_to_host(void* dest, void* src, size_t size)
+{
+ /* TODO: checking */
+ sbi_memcpy(dest, src, size);
+ return 0;
+}
+
+int copy_word_to_host(unsigned int* ptr, uintptr_t value)
+{
+ /* TODO: checking */
+ *ptr = value;
+ return 0;
+}
+
+static void enter_enclave_world(int eid)
+{
+ cpus[csr_read(CSR_MHARTID)].in_enclave = ENCLAVE_MODE;
+ cpus[csr_read(CSR_MHARTID)].eid = eid;
+
+ platform_enter_enclave_world();
+}
+
+static int get_enclave_id()
+{
+ return cpus[csr_read(CSR_MHARTID)].eid;
+}
+
+static void exit_enclave_world()
+{
+ cpus[csr_read(CSR_MHARTID)].in_enclave = 0;
+ cpus[csr_read(CSR_MHARTID)].eid = -1;
+
+ platform_exit_enclave_world();
+}
+
+int check_in_enclave_world()
+{
+ if(!(cpus[csr_read(CSR_MHARTID)].in_enclave))
+ return -1;
+
+ if(platform_check_in_enclave_world() < 0)
+ return -1;
+
+ return 0;
+}
+
+static int check_enclave_authentication()
+{
+ if(platform_check_enclave_authentication() < 0)
+ return -1;
+
+ return 0;
+}
+
+static void switch_to_enclave_ptbr(struct thread_state_t* thread, uintptr_t ptbr)
+{
+ platform_switch_to_enclave_ptbr(thread, ptbr);
+}
+
+static void switch_to_host_ptbr(struct thread_state_t* thread, uintptr_t ptbr)
+{
+ platform_switch_to_host_ptbr(thread, ptbr);
+}
+
+struct link_mem_t* init_mem_link(unsigned long mem_size, unsigned long slab_size)
+{
+ struct link_mem_t* head;
+
+ head = (struct link_mem_t*)mm_alloc(mem_size, NULL);
+
+ if (head == NULL)
+ return NULL;
+ else
+ sbi_memset((void*)head, 0, mem_size);
+
+ head->mem_size = mem_size;
+ head->slab_size = slab_size;
+ head->slab_num = (mem_size - sizeof(struct link_mem_t)) / slab_size;
+ void* align_addr = (char*)head + sizeof(struct link_mem_t);
+ head->addr = (char*)size_up_align((unsigned long)align_addr, slab_size);
+ head->next_link_mem = NULL;
+
+ return head;
+}
+
+struct link_mem_t* add_link_mem(struct link_mem_t** tail)
+{
+ struct link_mem_t* new_link_mem;
+
+ new_link_mem = (struct link_mem_t*)mm_alloc((*tail)->mem_size, NULL);
+
+ if (new_link_mem == NULL)
+ return NULL;
+ else
+ sbi_memset((void*)new_link_mem, 0, (*tail)->mem_size);
+
+ (*tail)->next_link_mem = new_link_mem;
+ new_link_mem->mem_size = (*tail)->mem_size;
+ new_link_mem->slab_num = (*tail)->slab_num;
+ new_link_mem->slab_size = (*tail)->slab_size;
+ void* align_addr = (char*)new_link_mem + sizeof(struct link_mem_t);
+ new_link_mem->addr = (char*)size_up_align((unsigned long)align_addr, (*tail)->slab_size);
+ new_link_mem->next_link_mem = NULL;
+
+ return new_link_mem;
+}
+
+int remove_link_mem(struct link_mem_t** head, struct link_mem_t* ptr)
+{
+ struct link_mem_t *cur_link_mem, *tmp_link_mem;
+ int retval =0;
+
+ cur_link_mem = *head;
+ if (cur_link_mem == ptr)
+ {
+ *head = cur_link_mem->next_link_mem;
+ mm_free(cur_link_mem, cur_link_mem->mem_size);
+ return 1;
+ }
+
+ for (cur_link_mem = *head; cur_link_mem != NULL; cur_link_mem = cur_link_mem->next_link_mem)
+ {
+ if (cur_link_mem->next_link_mem == ptr)
+ {
+ tmp_link_mem = cur_link_mem->next_link_mem;
+ cur_link_mem->next_link_mem = cur_link_mem->next_link_mem->next_link_mem;
+ //FIXME
+ mm_free(tmp_link_mem, tmp_link_mem->mem_size);
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+/*
+ * alloc an enclave struct now, which is zeroed
+ * Note: do not acquire metadata lock before the function!
+ * */
+static struct enclave_t* alloc_enclave()
+{
+ struct link_mem_t *cur, *next;
+ struct enclave_t* enclave = NULL;
+ int i, found, eid;
+
+ spin_lock(&enclave_metadata_lock);
+
+ //enclave metadata list hasn't be initialized yet
+ if(enclave_metadata_head == NULL)
+ {
+ enclave_metadata_head = init_mem_link(ENCLAVE_METADATA_REGION_SIZE, sizeof(struct enclave_t));
+ if(!enclave_metadata_head)
+ {
+ printm("[Penglai Monitor@%s] don't have enough mem\r\n", __func__);
+ goto alloc_eid_out;
+ }
+ enclave_metadata_tail = enclave_metadata_head;
+ }
+
+ found = 0;
+ eid = 0;
+ for(cur = enclave_metadata_head; cur != NULL; cur = cur->next_link_mem)
+ {
+ for(i = 0; i < (cur->slab_num); i++)
+ {
+ enclave = (struct enclave_t*)(cur->addr) + i;
+ if(enclave->state == INVALID)
+ {
+ sbi_memset((void*)enclave, 0, sizeof(struct enclave_t));
+ enclave->state = FRESH;
+ enclave->eid = eid;
+ found = 1;
+ break;
+ }
+ eid++;
+ }
+ if(found)
+ break;
+ }
+
+ //don't have enough enclave metadata
+ if(!found)
+ {
+ next = add_link_mem(&enclave_metadata_tail);
+ if(next == NULL)
+ {
+ printm("[Penglai Monitor@%s] don't have enough mem\r\n", __func__);
+ enclave = NULL;
+ goto alloc_eid_out;
+ }
+ enclave = (struct enclave_t*)(next->addr);
+ sbi_memset((void*)enclave, 0, sizeof(struct enclave_t));
+ enclave->state = FRESH;
+ enclave->eid = eid;
+ }
+
+alloc_eid_out:
+ spin_unlock(&enclave_metadata_lock);
+ return enclave;
+}
+
+static int free_enclave(int eid)
+{
+ struct link_mem_t *cur;
+ struct enclave_t *enclave = NULL;
+ int found, count, ret_val;
+
+ spin_lock(&enclave_metadata_lock);
+
+ found = 0;
+ count = 0;
+ for(cur = enclave_metadata_head; cur != NULL; cur = cur->next_link_mem)
+ {
+ if(eid < (count + cur->slab_num))
+ {
+ enclave = (struct enclave_t*)(cur->addr) + (eid - count);
+ sbi_memset((void*)enclave, 0, sizeof(struct enclave_t));
+ enclave->state = INVALID;
+ found = 1;
+ ret_val = 0;
+ break;
+ }
+ count += cur->slab_num;
+ }
+
+ //haven't alloc this eid
+ if(!found)
+ {
+ printm("[Penglai Monitor@%s] haven't alloc this eid\r\n", __func__);
+ ret_val = -1;
+ }
+
+ spin_unlock(&enclave_metadata_lock);
+
+ return ret_val;
+}
+
+struct enclave_t* get_enclave(int eid)
+{
+ struct link_mem_t *cur;
+ struct enclave_t *enclave;
+ int found, count;
+
+ spin_lock(&enclave_metadata_lock);
+
+ found = 0;
+ count = 0;
+ for(cur = enclave_metadata_head; cur != NULL; cur = cur->next_link_mem)
+ {
+ if(eid < (count + cur->slab_num))
+ {
+ enclave = (struct enclave_t*)(cur->addr) + (eid - count);
+ found = 1;
+ break;
+ }
+
+ count += cur->slab_num;
+ }
+
+ //haven't alloc this eid
+ if(!found)
+ {
+ printm("[Penglai Monitor@%s] haven't alloc this enclave\r\n", __func__);
+ enclave = NULL;
+ }
+
+ spin_unlock(&enclave_metadata_lock);
+ return enclave;
+}
+
+int swap_from_host_to_enclave(uintptr_t* host_regs, struct enclave_t* enclave)
+{
+ //grant encalve access to memory
+ if(grant_enclave_access(enclave) < 0)
+ return -1;
+
+ //save host context
+ swap_prev_state(&(enclave->thread_context), host_regs);
+
+ //different platforms have differnt ptbr switch methods
+ switch_to_enclave_ptbr(&(enclave->thread_context), enclave->thread_context.encl_ptbr);
+
+ /*
+ * save host cache binding
+ * only workable when the hardware supports the feature
+ */
+#if 0
+ swap_prev_cache_binding(&enclave -> threads[0], read_csr(0x356));
+#endif
+
+ // disable interrupts
+ swap_prev_mie(&(enclave->thread_context), csr_read(CSR_MIE));
+
+ // clear pending interrupts
+ csr_read_clear(CSR_MIP, MIP_MTIP);
+ csr_read_clear(CSR_MIP, MIP_STIP);
+ csr_read_clear(CSR_MIP, MIP_SSIP);
+ csr_read_clear(CSR_MIP, MIP_SEIP);
+
+ //disable interrupts/exceptions delegation
+ swap_prev_mideleg(&(enclave->thread_context), csr_read(CSR_MIDELEG));
+ swap_prev_medeleg(&(enclave->thread_context), csr_read(CSR_MEDELEG));
+
+ // swap the mepc to transfer control to the enclave
+ // This will be overwriten by the entry-address in the case of run_enclave
+ //swap_prev_mepc(&(enclave->thread_context), csr_read(CSR_MEPC));
+ swap_prev_mepc(&(enclave->thread_context), host_regs[32]);
+ host_regs[32] = csr_read(CSR_MEPC); //update the new value to host_regs
+
+ //set return address to enclave
+
+ //set mstatus to transfer control to u mode
+ uintptr_t mstatus = host_regs[33]; //In OpenSBI, we use regs to change mstatus
+ mstatus = INSERT_FIELD(mstatus, MSTATUS_MPP, PRV_U);
+ host_regs[33] = mstatus;
+
+ //mark that cpu is in enclave world now
+ enter_enclave_world(enclave->eid);
+
+ __asm__ __volatile__ ("sfence.vma" : : : "memory");
+
+ return 0;
+}
+
+int swap_from_enclave_to_host(uintptr_t* regs, struct enclave_t* enclave)
+{
+ //retrieve enclave access to memory
+ retrieve_enclave_access(enclave);
+
+ //restore host context
+ swap_prev_state(&(enclave->thread_context), regs);
+
+ //restore host's ptbr
+ switch_to_host_ptbr(&(enclave->thread_context), enclave->host_ptbr);
+
+ //TODO: restore host cache binding
+ //swap_prev_cache_binding(&(enclave->thread_context), );
+
+ //restore interrupts
+ swap_prev_mie(&(enclave->thread_context), csr_read(CSR_MIE));
+
+ //restore interrupts/exceptions delegation
+ swap_prev_mideleg(&(enclave->thread_context), csr_read(CSR_MIDELEG));
+ swap_prev_medeleg(&(enclave->thread_context), csr_read(CSR_MEDELEG));
+
+ //transfer control back to kernel
+ //swap_prev_mepc(&(enclave->thread_context), read_csr(mepc));
+ //regs[32] = (uintptr_t)(enclave->thread_context.prev_mepc); //In OpenSBI, we use regs to change mepc
+ swap_prev_mepc(&(enclave->thread_context), regs[32]);
+ regs[32] = csr_read(CSR_MEPC); //update the new value to host_regs
+
+ //restore mstatus
+#if 0
+ uintptr_t mstatus = read_csr(mstatus);
+ mstatus = INSERT_FIELD(mstatus, MSTATUS_MPP, PRV_S);
+ write_csr(mstatus, mstatus);
+#else
+ uintptr_t mstatus = regs[33]; //In OpenSBI, we use regs to change mstatus
+ mstatus = INSERT_FIELD(mstatus, MSTATUS_MPP, PRV_S);
+ regs[33] = mstatus;
+#endif
+
+ //mark that cpu is out of enclave world now
+ exit_enclave_world();
+
+ __asm__ __volatile__ ("sfence.vma" : : : "memory");
+
+ return 0;
+}
+
+uintptr_t create_enclave(struct enclave_sbi_param_t create_args)
+{
+ struct enclave_t* enclave;
+
+ enclave = alloc_enclave();
+ if(!enclave)
+ {
+ printm("[Penglai Monitor@%s] enclave allocation is failed \r\n", __func__);
+ return -1UL;
+ }
+
+ //TODO: check whether enclave memory is out of bound
+ //TODO: verify enclave page table layout
+
+ spin_lock(&enclave_metadata_lock);
+
+ enclave->paddr = create_args.paddr;
+ enclave->size = create_args.size;
+ enclave->entry_point = create_args.entry_point;
+ enclave->untrusted_ptr = create_args.untrusted_ptr;
+ enclave->untrusted_size = create_args.untrusted_size;
+ enclave->free_mem = create_args.free_mem;
+ enclave->ocall_func_id = create_args.ecall_arg0;
+ enclave->ocall_arg0 = create_args.ecall_arg1;
+ enclave->ocall_arg1 = create_args.ecall_arg2;
+ enclave->ocall_syscall_num = create_args.ecall_arg3;
+ enclave->host_ptbr = csr_read(CSR_SATP);
+ enclave->thread_context.encl_ptbr = (create_args.paddr >> (RISCV_PGSHIFT) | SATP_MODE_CHOICE);
+ enclave->root_page_table = (unsigned long*)create_args.paddr;
+ enclave->state = FRESH;
+
+ //Dump the PT here, for debug
+#if 0
+ printm("[Penglai@%s], Dump PT for created enclave\n", __func__);
+ dump_pt(enclave->root_page_table, 1);
+#endif
+
+ spin_unlock(&enclave_metadata_lock);
+ printm("[Penglai@%s] paddr:0x%lx, size:0x%lx, entry:0x%lx\n"
+ "untrusted ptr:0x%lx host_ptbr:0x%lx, pt:0x%ln\n"
+ "thread_context.encl_ptbr:0x%lx\n cur_satp:0x%lx\n",
+ __func__, enclave->paddr, enclave->size, enclave->entry_point,
+ enclave->untrusted_ptr, enclave->host_ptbr, enclave->root_page_table,
+ enclave->thread_context.encl_ptbr, csr_read(CSR_SATP));
+
+ copy_word_to_host((unsigned int*)create_args.eid_ptr, enclave->eid);
+ printm("[Penglai Monitor@%s] return eid:%d\n",
+ __func__, enclave->eid);
+
+ return 0;
+}
+
+uintptr_t run_enclave(uintptr_t* regs, unsigned int eid)
+{
+ struct enclave_t* enclave;
+ uintptr_t retval = 0;
+
+ enclave = get_enclave(eid);
+ if (!enclave)
+ {
+ printm_err("[Penglai Monitor@%s] wrong enclave id\r\n", __func__);
+ return -1UL;
+ }
+
+ spin_lock(&enclave_metadata_lock);
+
+ if (enclave->state != FRESH)
+ {
+ printm_err("[Penglai Monitor@%s] enclave is not initialized or already used\r\n", __func__);
+ retval = -1UL;
+ goto run_enclave_out;
+ }
+ if (enclave->host_ptbr != csr_read(CSR_SATP))
+ {
+ printm_err("[Penglai Monitor@%s] enclave doesn't belong to current host process\r\n", __func__);
+ retval = -1UL;
+ goto run_enclave_out;
+ }
+
+ if (swap_from_host_to_enclave(regs, enclave) < 0)
+ {
+ printm("[Penglai Monitor@%s] enclave can not be run\r\n", __func__);
+ retval = -1UL;
+ goto run_enclave_out;
+ }
+
+ //swap_prev_mepc(&(enclave->thread_context), regs[32]);
+ regs[32] = (uintptr_t)(enclave->entry_point); //In OpenSBI, we use regs to change mepc
+
+ //TODO: enable timer interrupt
+ csr_read_set(CSR_MIE, MIP_MTIP);
+
+ //set default stack
+ regs[2] = ENCLAVE_DEFAULT_STACK;
+
+ //pass parameters
+ regs[11] = (uintptr_t)enclave->entry_point;
+ regs[12] = (uintptr_t)enclave->untrusted_ptr;
+ regs[13] = (uintptr_t)enclave->untrusted_size;
+
+ enclave->state = RUNNING;
+
+run_enclave_out:
+ spin_unlock(&enclave_metadata_lock);
+ return retval;
+}
+
+uintptr_t stop_enclave(uintptr_t* regs, unsigned int eid)
+{
+ uintptr_t retval = 0;
+ struct enclave_t *enclave = get_enclave(eid);
+ if(!enclave)
+ {
+ printm_err("[Penglai Monitor@%s] wrong enclave id%d\r\n", __func__, eid);
+ return -1UL;
+ }
+
+ spin_lock(&enclave_metadata_lock);
+
+ if(enclave->host_ptbr != csr_read(CSR_SATP))
+ {
+ printm_err("[Penglai Monitor@%s] enclave doesn't belong to current host process\r\n", __func__);
+ retval = -1UL;
+ goto stop_enclave_out;
+ }
+
+ if(enclave->state <= FRESH)
+ {
+ printm_err("[Penglai Monitor@%s] enclave%d hasn't begin running at all\r\n", __func__, eid);
+ retval = -1UL;
+ goto stop_enclave_out;
+ }
+
+ if(enclave->state == STOPPED || enclave-> state == DESTROYED)
+ {
+ printm_err("[Penglai Monitor@%s] enclave%d already stopped/destroyed\r\n", __func__, eid);
+ retval = -1UL;
+ goto stop_enclave_out;
+ }
+
+ /* The real-stop happen when the enclave traps into the monitor */
+ enclave->state = STOPPED;
+
+stop_enclave_out:
+ spin_unlock(&enclave_metadata_lock);
+ return retval;
+}
+
+uintptr_t destroy_enclave(uintptr_t* regs, unsigned int eid)
+{
+ uintptr_t retval = 0;
+ struct enclave_t *enclave = get_enclave(eid);
+ if(!enclave)
+ {
+ printm_err("[Penglai Monitor@%s] wrong enclave id%d\r\n", __func__, eid);
+ return -1UL;
+ }
+
+ spin_lock(&enclave_metadata_lock);
+
+ if (enclave->host_ptbr != csr_read(CSR_SATP))
+ {
+ printm_err("[Penglai Monitor@%s] enclave doesn't belong to current host process"
+ "enclave->host_ptbr:0x%lx, csr_satp:0x%lx\r\n", __func__, enclave->host_ptbr, csr_read(CSR_SATP));
+ retval = -1UL;
+ goto out;
+ }
+
+ if (enclave->state < FRESH)
+ {
+ printm_err("[Penglai Monitor@%s] enclave%d hasn't created\r\n", __func__, eid);
+ retval = -1UL;
+ goto out;
+ }
+
+ /*
+ * If the enclave is stopped or fresh, it will never goto the timer trap handler,
+ * we should destroy the enclave immediately
+ * */
+ //if (enclave->state == STOPPED || enclave->state == FRESH) {
+ if (enclave->state == FRESH) {
+ sbi_memset((void*)(enclave->paddr), 0, enclave->size);
+ mm_free((void*)(enclave->paddr), enclave->size);
+
+ spin_unlock(&enclave_metadata_lock);
+
+ //free enclave struct
+ retval = free_enclave(eid); //the enclave state will be set INVALID here
+ return retval;
+ }
+ //FIXME: what if the enclave->state is RUNNABLE now?
+
+ /* The real-destroy happen when the enclave traps into the monitor */
+ enclave->state = DESTROYED;
+out:
+ spin_unlock(&enclave_metadata_lock);
+ return retval;
+}
+
+uintptr_t resume_from_stop(uintptr_t* regs, unsigned int eid)
+{
+ uintptr_t retval = 0;
+ struct enclave_t* enclave = get_enclave(eid);
+
+ if (!enclave)
+ {
+ printm("[Penglai Monitor@%s] wrong enclave id%d\r\n", __func__, eid);
+ return -1UL;
+ }
+
+ spin_lock(&enclave_metadata_lock);
+ if(enclave->host_ptbr != csr_read(CSR_SATP))
+ {
+ printm("[Penglai Monitor@%s] enclave doesn't belong to current host process\r\n", __func__);
+ retval = -1UL;
+ goto resume_from_stop_out;
+ }
+
+ if(enclave->state != STOPPED)
+ {
+ printm("[Penglai Monitor@%s] enclave doesn't belong to current host process\r\n", __func__);
+ retval = -1UL;
+ goto resume_from_stop_out;
+ }
+
+ enclave->state = RUNNABLE;
+ printm("[Penglai Monitor@%s] encalve-%d turns to runnable now!\n", __func__, eid);
+
+resume_from_stop_out:
+ spin_unlock(&enclave_metadata_lock);
+ return retval;
+}
+
+uintptr_t resume_enclave(uintptr_t* regs, unsigned int eid)
+{
+ uintptr_t retval = 0;
+ struct enclave_t* enclave = get_enclave(eid);
+ if(!enclave)
+ {
+ printm("[Penglai Monitor@%s] wrong enclave id%d\r\n", __func__, eid);
+ return -1UL;
+ }
+
+ spin_lock(&enclave_metadata_lock);
+
+ if(enclave->host_ptbr != csr_read(CSR_SATP))
+ {
+ printm("[Penglai Monitor@%s] enclave doesn't belong to current host process\r\n", __func__);
+ retval = -1UL;
+ goto resume_enclave_out;
+ }
+
+ if(enclave->state == STOPPED)
+ {
+ retval = ENCLAVE_TIMER_IRQ;
+ goto resume_enclave_out;
+ }
+
+ if (enclave->state == DESTROYED) {
+ sbi_memset((void*)(enclave->paddr), 0, enclave->size);
+ mm_free((void*)(enclave->paddr), enclave->size);
+
+ spin_unlock(&enclave_metadata_lock);
+
+ //free enclave struct
+ free_enclave(eid); //the enclave state will be set INVALID here
+ return ENCLAVE_SUCCESS; //this will break the infinite loop in the enclave-driver
+ }
+
+ if(enclave->state != RUNNABLE)
+ {
+ printm("[Penglai Monitor@%s] enclave%d is not runnable\r\n", __func__, eid);
+ retval = -1UL;
+ goto resume_enclave_out;
+ }
+
+ if(swap_from_host_to_enclave(regs, enclave) < 0)
+ {
+ printm("[Penglai Monitor@%s] enclave can not be run\r\n", __func__);
+ retval = -1UL;
+ goto resume_enclave_out;
+ }
+
+ enclave->state = RUNNING;
+
+ //regs[10] will be set to retval when mcall_trap return, so we have to
+ //set retval to be regs[10] here to succuessfully restore context
+ //TODO: retval should be set to indicate success or fail when resume from ocall
+ retval = regs[10];
+
+ //enable timer interrupt
+ csr_read_set(CSR_MIE, MIP_MTIP);
+
+resume_enclave_out:
+ spin_unlock(&enclave_metadata_lock);
+ return retval;
+}
+
+uintptr_t exit_enclave(uintptr_t* regs, unsigned long retval)
+{
+
+ struct enclave_t *enclave;
+ int eid;
+
+ if(check_in_enclave_world() < 0)
+ {
+ printm_err("[Penglai Monitor@%s] cpu is not in enclave world now\r\n", __func__);
+ return -1;
+ }
+ printm_err("[Penglai Monitor@%s] retval of enclave is %lx\r\n", __func__, retval);
+
+ eid = get_enclave_id();
+ enclave = get_enclave(eid);
+ if(!enclave)
+ {
+ printm("[Penglai Monitor@%s] didn't find eid%d 's corresponding enclave\r\n", __func__, eid);
+ return -1UL;
+ }
+
+ spin_lock(&enclave_metadata_lock);
+
+ if(check_enclave_authentication(enclave) < 0)
+ {
+ printm_err("[Penglai Monitor@%s] current enclave's eid is not %d\r\n", __func__, eid);
+ spin_unlock(&enclave_metadata_lock);
+ return -1UL;
+ }
+
+ swap_from_enclave_to_host(regs, enclave);
+
+ //free enclave's memory
+ //TODO: support multiple memory region
+ sbi_memset((void*)(enclave->paddr), 0, enclave->size);
+ mm_free((void*)(enclave->paddr), enclave->size);
+
+ spin_unlock(&enclave_metadata_lock);
+
+ //free enclave struct
+ free_enclave(eid);
+
+ return 0;
+}
+
+/*
+ * Timer handler for penglai enclaves
+ * In normal case, an enclave will pin a HART and run until it finished.
+ * The exception case is timer interrupt, which will trap into monitor to
+ * check current enclave states.
+ *
+ * If current enclave states is not Running or Runnable, it will be stoped/destroyed
+ *
+ * */
+uintptr_t do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc)
+{
+ uintptr_t retval = 0;
+ unsigned int eid = get_enclave_id();
+ struct enclave_t *enclave = get_enclave(eid);
+ if (!enclave)
+ {
+ printm("[Penglai Monitor@%s] something is wrong with enclave%d\r\n", __func__, eid);
+ return -1UL;
+ }
+
+ spin_lock(&enclave_metadata_lock);
+
+ if (enclave->state != RUNNING && enclave->state != RUNNABLE)
+ {
+ printm("[Penglai Monitor@%s] Enclave(%d) is not runnable\r\n", __func__, eid);
+ retval = -1;
+ }
+
+ swap_from_enclave_to_host(regs, enclave);
+
+ if (enclave->state == DESTROYED) {
+ sbi_memset((void*)(enclave->paddr), 0, enclave->size);
+ mm_free((void*)(enclave->paddr), enclave->size);
+
+ spin_unlock(&enclave_metadata_lock);
+
+ //free enclave struct
+ retval = free_enclave(eid); //the enclave state will be set INVALID here
+
+ retval = ENCLAVE_SUCCESS; //this means we will not run any more
+ goto timer_irq_out;
+ }else if (enclave->state == RUNNING) {
+ enclave->state = RUNNABLE;
+
+ retval = ENCLAVE_TIMER_IRQ;
+ }else { // The case for STOPPED
+ retval = ENCLAVE_TIMER_IRQ;
+ }
+
+ spin_unlock(&enclave_metadata_lock);
+
+timer_irq_out:
+ /*ret set timer now*/
+ // sbi_timer_event_start(csr_read(CSR_TIME) + ENCLAVE_TIME_CREDITS);
+ return retval;
+}
diff --git a/lib/sbi/sm/platform/README.md b/lib/sbi/sm/platform/README.md
new file mode 100644
index 0000000..f81659b
--- /dev/null
+++ b/lib/sbi/sm/platform/README.md
@@ -0,0 +1,9 @@
+## Platforms
+
+Penglai is designed to naturally support different platforms with their own isolation methods.
+
+Currently, it supports:
+
+- PMP-only platforms: this is suitable for most devices
+- PMP + sPMP/MPU: Penglai can achieve better scalability with sPMP/MPU
+- TVM (or Guarded Paging): please refer to another repo for more details about TVM
diff --git a/lib/sbi/sm/platform/pmp/enclave_mm.c b/lib/sbi/sm/platform/pmp/enclave_mm.c
new file mode 100644
index 0000000..bd9c81d
--- /dev/null
+++ b/lib/sbi/sm/platform/pmp/enclave_mm.c
@@ -0,0 +1,705 @@
+#include <sm/sm.h>
+#include <sm/enclave.h>
+#include <sm/platform/pmp/enclave_mm.h>
+//#include <sm/atomic.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/riscv_locks.h>
+//#include "mtrap.h"
+#include <sm/math.h>
+#include <sbi/sbi_string.h>
+
+/*
+ * Only NPMP-3 enclave regions are supported.
+ * The last PMP is used to allow kernel to access memory.
+ * The 1st PMP is used to protect security monitor from kernel.
+ * The 2nd PMP is used to allow kernel to configure enclave's page table.
+ * Othres, (NPMP-3) PMPs are for enclaves, i.e., secure memory
+ *
+ * TODO: this array can be removed as we can get
+ * existing enclave regions via pmp registers
+ */
+static struct mm_region_t mm_regions[N_PMP_REGIONS];
+static unsigned long pmp_bitmap = 0;
+static spinlock_t pmp_bitmap_lock = SPIN_LOCK_INITIALIZER;
+
+
+/*
+ * Check the validness of the paddr and size
+ * */
+static int check_mem_size(uintptr_t paddr, unsigned long size)
+{
+ if((size == 0) || (size & (size - 1)))
+ {
+ printm_err("pmp size should be 2^power!\r\n");
+ return -1;
+ }
+
+ if(size < RISCV_PGSIZE)
+ {
+ printm_err("pmp size should be no less than one page!\r\n");
+ return -1;
+ }
+
+ if(paddr & (size - 1))
+ {
+ printm_err("pmp size should be %ld aligned!\r\n", size);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * TODO: we should protect kernel temporal region with lock
+ * A possible malicious case:
+ * kernel@Hart-0: acquire memory region, set to PMP-1
+ * kernel@Hart-1: acquire memory region, set to PMP-1 <- this will overlap the prior region
+ * kernel@Hart-0: release memory region <- dangerous behavior now
+ * */
+
+/**
+ * \brief This function grants kernel (temporaily) access to allocated enclave memory
+ * for initializing enclave and configuring page table.
+ */
+int grant_kernel_access(void* req_paddr, unsigned long size)
+{
+ //pmp1 is used for allowing kernel to access enclave memory
+ int pmp_idx = 1;
+ struct pmp_config_t pmp_config;
+ uintptr_t paddr = (uintptr_t)req_paddr;
+
+ if(check_mem_size(paddr, size) != 0){
+ printm("[Penglai Monitor@%s] check_mem_size failed\n", __func__);
+ return -1;
+ }
+
+ pmp_config.paddr = paddr;
+ pmp_config.size = size;
+ pmp_config.perm = PMP_R | PMP_W | PMP_X;
+ pmp_config.mode = PMP_A_NAPOT;
+ set_pmp_and_sync(pmp_idx, pmp_config);
+
+ return 0;
+}
+
+/*
+ * This function retrieves kernel access to allocated enclave memory.
+ */
+int retrieve_kernel_access(void* req_paddr, unsigned long size)
+{
+ //pmp1 is used for allowing kernel to access enclave memory
+ int pmp_idx = 1;
+ struct pmp_config_t pmp_config;
+ uintptr_t paddr = (uintptr_t)req_paddr;
+
+ pmp_config = get_pmp(pmp_idx);
+
+ if((pmp_config.mode != PMP_A_NAPOT) || (pmp_config.paddr != paddr) || (pmp_config.size != size))
+ {
+ printm_err("retrieve_kernel_access: error pmp_config\r\n");
+ return -1;
+ }
+
+ clear_pmp_and_sync(pmp_idx);
+
+ return 0;
+}
+
+//grant enclave access to enclave's memory
+int grant_enclave_access(struct enclave_t* enclave)
+{
+ int region_idx = 0;
+ int pmp_idx = 0;
+ struct pmp_config_t pmp_config;
+
+ if(check_mem_size(enclave->paddr, enclave->size) < 0)
+ return -1;
+
+ //set pmp permission, ensure that enclave's paddr and size is pmp legal
+ //TODO: support multiple memory regions
+ spin_lock(&pmp_bitmap_lock);
+ for(region_idx = 0; region_idx < N_PMP_REGIONS; ++region_idx)
+ {
+ if(mm_regions[region_idx].valid && region_contain(
+ mm_regions[region_idx].paddr, mm_regions[region_idx].size,
+ enclave->paddr, enclave->size))
+ {
+ break;
+ }
+ }
+ spin_unlock(&pmp_bitmap_lock);
+
+ if(region_idx >= N_PMP_REGIONS)
+ {
+ printm_err("M mode: grant_enclave_access: can not find exact mm_region\r\n");
+ return -1;
+ }
+
+ pmp_idx = REGION_TO_PMP(region_idx);
+#if 0
+ pmp_config.paddr = mm_regions[region_idx].paddr;
+ pmp_config.size = mm_regions[region_idx].size;
+#else
+ //this enclave memory region could be less than the mm_region size
+ pmp_config.paddr = enclave->paddr;
+ pmp_config.size = enclave->size;
+#endif
+ pmp_config.perm = PMP_R | PMP_W | PMP_X;
+ pmp_config.mode = PMP_A_NAPOT;
+
+ /* Note: here we only set the PMP regions in local Hart*/
+ set_pmp(pmp_idx, pmp_config);
+
+ /*FIXME: we should handle the case that the PMP region contains larger region */
+ if (pmp_config.paddr != enclave->paddr || pmp_config.size != enclave->size){
+ printm("[Penglai Monitor@%s] warning, region != enclave mem\n", __func__);
+ printm("[Penglai Monitor@%s] region: paddr(0x%lx) size(0x%lx)\n",
+ __func__, pmp_config.paddr, pmp_config.size);
+ printm("[Penglai Monitor@%s] enclave mem: paddr(0x%lx) size(0x%lx)\n",
+ __func__, enclave->paddr, enclave->size);
+ }
+
+ return 0;
+}
+
+int retrieve_enclave_access(struct enclave_t *enclave)
+{
+ int region_idx = 0;
+ int pmp_idx = 0;
+ //struct pmp_config_t pmp_config;
+
+ //set pmp permission, ensure that enclave's paddr and size is pmp legal
+ //TODO: support multiple memory regions
+ spin_lock(&pmp_bitmap_lock);
+ for(region_idx = 0; region_idx < N_PMP_REGIONS; ++region_idx)
+ {
+ if(mm_regions[region_idx].valid && region_contain(
+ mm_regions[region_idx].paddr, mm_regions[region_idx].size,
+ enclave->paddr, enclave->size))
+ {
+ break;
+ }
+ }
+ spin_unlock(&pmp_bitmap_lock);
+
+ if(region_idx >= N_PMP_REGIONS)
+ {
+ printm_err("M mode: Error: %s\r\n", __func__);
+ /* For Debug */
+ for (region_idx = 0; region_idx < N_PMP_REGIONS; ++region_idx) {
+ printm("[Monitor Debug@%s] mm_region[%d], valid(%d), paddr(0x%lx) size(0x%lx)\n",
+ __func__, region_idx, mm_regions[region_idx].valid, mm_regions[region_idx].paddr,
+ mm_regions[region_idx].size);
+ }
+ printm("[Monitor Debug@%s] enclave paddr(0x%lx) size(0x%lx)\n",
+ __func__, enclave->paddr, enclave->size);
+
+ return -1;
+ }
+
+ pmp_idx = REGION_TO_PMP(region_idx);
+
+ // we can simply clear the PMP to retrieve the permission
+ clear_pmp(pmp_idx);
+
+ return 0;
+}
+
+int check_mem_overlap(uintptr_t paddr, unsigned long size)
+{
+ unsigned long sm_base = SM_BASE;
+ unsigned long sm_size = SM_SIZE;
+ int region_idx = 0;
+
+ //check whether the new region overlaps with security monitor
+ if(region_overlap(sm_base, sm_size, paddr, size))
+ {
+ printm_err("pmp memory overlaps with security monitor!\r\n");
+ return -1;
+ }
+
+ //check whether the new region overlap with existing enclave region
+ for(region_idx = 0; region_idx < N_PMP_REGIONS; ++region_idx)
+ {
+ if(mm_regions[region_idx].valid
+ && region_overlap(mm_regions[region_idx].paddr, mm_regions[region_idx].size,
+ paddr, size))
+ {
+ printm_err("pmp memory overlaps with existing pmp memory!\r\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+uintptr_t mm_init(uintptr_t paddr, unsigned long size)
+{
+ uintptr_t retval = 0;
+ int region_idx = 0;
+ int pmp_idx =0;
+ struct pmp_config_t pmp_config;
+
+ //check align of paddr and size
+ if(check_mem_size(paddr, size) < 0)
+ return -1UL;
+
+ //acquire a free enclave region
+ spin_lock(&pmp_bitmap_lock);
+
+ //check memory overlap
+ //memory overlap should be checked after acquire lock
+ if(check_mem_overlap(paddr, size) < 0)
+ {
+ retval = -1UL;
+ goto out;
+ }
+
+ //alloc a free pmp
+ for(region_idx = 0; region_idx < N_PMP_REGIONS; ++region_idx)
+ {
+ pmp_idx = REGION_TO_PMP(region_idx);
+ if(!(pmp_bitmap & (1<<pmp_idx)))
+ {
+ //FIXME: we already have mm_regions[x].valid, why pmp_bitmap again
+ pmp_bitmap |= (1 << pmp_idx);
+ break;
+ }
+ }
+ if(region_idx >= N_PMP_REGIONS)
+ {
+ retval = -1UL;
+ goto out;
+ }
+
+ //set PMP to protect enclave memory region
+ pmp_config.paddr = paddr;
+ pmp_config.size = size;
+ pmp_config.perm = PMP_NO_PERM;
+ pmp_config.mode = PMP_A_NAPOT;
+ set_pmp_and_sync(pmp_idx, pmp_config);
+
+ //mark this region is valid and init mm_list
+ mm_regions[region_idx].valid = 1;
+ mm_regions[region_idx].paddr = paddr;
+ mm_regions[region_idx].size = size;
+ struct mm_list_t *mm_list = (struct mm_list_t*)PADDR_2_MM_LIST(paddr);
+ mm_list->order = ilog2(size-1) + 1;
+ mm_list->prev_mm = NULL;
+ mm_list->next_mm = NULL;
+ struct mm_list_head_t *mm_list_head = (struct mm_list_head_t*)paddr;
+ mm_list_head->order = mm_list->order;
+ mm_list_head->prev_list_head = NULL;
+ mm_list_head->next_list_head = NULL;
+ mm_list_head->mm_list = mm_list;
+ mm_regions[region_idx].mm_list_head = mm_list_head;
+
+out:
+ spin_unlock(&pmp_bitmap_lock);
+ return retval;
+}
+
+//NOTE: this function may modify the arg mm_list_head
+//remember to acquire lock before calling this function
+//be sure that mm_region does exist in mm_list and mm_list does exist in mm_lists
+static int delete_certain_region(int region_idx, struct mm_list_head_t** mm_list_head, struct mm_list_t *mm_region)
+{
+ struct mm_list_t* prev_mm = mm_region->prev_mm;
+ struct mm_list_t* next_mm = mm_region->next_mm;
+ struct mm_list_head_t* prev_list_head = (*mm_list_head)->prev_list_head;
+ struct mm_list_head_t* next_list_head = (*mm_list_head)->next_list_head;
+
+ //delete mm_region from old mm_list
+ //mm_region is in the middle of the mm_list
+ if(prev_mm)
+ {
+ prev_mm->next_mm = next_mm;
+ if(next_mm)
+ next_mm->prev_mm = prev_mm;
+ }
+ //mm_region is in the first place of old mm_list
+ else if(next_mm)
+ {
+ next_mm->prev_mm = NULL;
+ struct mm_list_head_t* new_list_head = (struct mm_list_head_t*)MM_LIST_2_PADDR(next_mm);
+ new_list_head->order = next_mm->order;
+ new_list_head->prev_list_head = prev_list_head;
+ new_list_head->next_list_head = next_list_head;
+ new_list_head->mm_list = next_mm;
+ if(prev_list_head)
+ prev_list_head->next_list_head = new_list_head;
+ else
+ mm_regions[region_idx].mm_list_head = new_list_head;
+ if(next_list_head)
+ next_list_head->prev_list_head = new_list_head;
+
+ *mm_list_head = new_list_head;
+ }
+ //mm_region is the only region in old mm_list
+ else
+ {
+ if(prev_list_head)
+ prev_list_head->next_list_head = next_list_head;
+ else
+ mm_regions[region_idx].mm_list_head = next_list_head;
+ if(next_list_head)
+ next_list_head->prev_list_head = prev_list_head;
+
+ *mm_list_head = NULL;
+ }
+
+ return 0;
+}
+
+//remember to acquire a lock before calling this function
+static struct mm_list_t* alloc_one_region(int region_idx, int order)
+{
+ if(!mm_regions[region_idx].valid || !mm_regions[region_idx].mm_list_head)
+ {
+ printm("M mode: alloc_one_region: m_regions[%d] is invalid/NULL\r\n", region_idx);
+ return NULL;
+ }
+
+ struct mm_list_head_t *mm_list_head = mm_regions[region_idx].mm_list_head;
+ while(mm_list_head && (mm_list_head->order < order))
+ {
+ mm_list_head = mm_list_head->next_list_head;
+ }
+
+ //current region has no enough free space
+ if(!mm_list_head)
+ return NULL;
+
+ //pick a mm region from current mm_list
+ struct mm_list_t *mm_region = mm_list_head->mm_list;
+
+ //delete the mm region from current mm_list
+ delete_certain_region(region_idx, &mm_list_head, mm_region);
+
+ return mm_region;
+}
+
+//remember to acquire lock before calling this function
+//be sure that mm_list_head does exist in mm_lists
+static int merge_regions(int region_idx, struct mm_list_head_t* mm_list_head, struct mm_list_t *mm_region)
+{
+ if(region_idx<0 || region_idx>=N_PMP_REGIONS || !mm_list_head || !mm_region)
+ return -1;
+ if(mm_list_head->order != mm_region->order)
+ return -1;
+
+ struct mm_list_head_t* current_list_head = mm_list_head;
+ struct mm_list_t* current_region = mm_region;
+ while(current_list_head)
+ {
+ struct mm_list_t* buddy_region = current_list_head->mm_list;
+ unsigned long paddr = (unsigned long)MM_LIST_2_PADDR(current_region);
+ unsigned long buddy_paddr = (unsigned long)MM_LIST_2_PADDR(buddy_region);
+ while(buddy_region)
+ {
+ buddy_paddr = (unsigned long)MM_LIST_2_PADDR(buddy_region);
+ if((paddr | (1 << current_region->order)) == (buddy_paddr | (1 << current_region->order)))
+ break;
+ buddy_region = buddy_region->next_mm;
+ }
+
+ struct mm_list_head_t* new_list_head = (struct mm_list_head_t*)MM_LIST_2_PADDR(current_region);
+ struct mm_list_head_t* prev_list_head = current_list_head->prev_list_head;
+ struct mm_list_head_t* next_list_head = current_list_head->next_list_head;
+ //didn't find buddy region, just insert this region in current mm_list
+ if(!buddy_region)
+ {
+ current_region->prev_mm = NULL;
+ current_region->next_mm = current_list_head->mm_list;
+ current_list_head->mm_list->prev_mm = current_region;
+ new_list_head->order = current_region->order;
+ new_list_head->prev_list_head = prev_list_head;
+ new_list_head->next_list_head = next_list_head;
+ new_list_head->mm_list = current_region;
+
+ if(prev_list_head)
+ prev_list_head->next_list_head = new_list_head;
+ else
+ mm_regions[region_idx].mm_list_head = new_list_head;
+ if(next_list_head)
+ next_list_head->prev_list_head = new_list_head;
+
+ break;
+ }
+
+ //found buddy_region, merge it and current region
+
+ //first delete buddy_region from old mm_list
+ //Note that this function may modify prev_list and next_list
+ //but won't modify their positions relative to new mm_region
+ delete_certain_region(region_idx, &current_list_head, buddy_region);
+
+ //then merge buddy_region with current region
+ int order = current_region->order;
+ current_region = paddr < buddy_paddr ? PADDR_2_MM_LIST(paddr) : PADDR_2_MM_LIST(buddy_paddr);
+ current_region->order = order + 1;
+ current_region->prev_mm = NULL;
+ current_region->next_mm = NULL;
+
+ //next mm_list doesn't exist or has a different order, no need to merge
+ if(!next_list_head || next_list_head->order != current_region->order)
+ {
+ //current_list_head may be NULL now after delete buddy region
+ if(current_list_head)
+ prev_list_head = current_list_head;
+ new_list_head = (struct mm_list_head_t*)MM_LIST_2_PADDR(current_region);
+ new_list_head->order = current_region->order;
+ new_list_head->prev_list_head = prev_list_head;
+ new_list_head->next_list_head = next_list_head;
+ new_list_head->mm_list = current_region;
+
+ if(prev_list_head)
+ prev_list_head->next_list_head = new_list_head;
+ else
+ mm_regions[region_idx].mm_list_head = new_list_head;
+ if(next_list_head)
+ next_list_head->prev_list_head = new_list_head;
+
+ break;
+ }
+
+ //continue to merge with next mm_list
+ current_list_head = next_list_head;
+ }
+
+ return 0;
+}
+
+//remember to acquire lock before calling this function
+static int insert_mm_region(int region_idx, struct mm_list_t* mm_region, int merge)
+{
+ if(region_idx<0 || region_idx>=N_PMP_REGIONS || !mm_regions[region_idx].valid || !mm_region)
+ return -1;
+
+ struct mm_list_head_t* mm_list_head = mm_regions[region_idx].mm_list_head;
+ struct mm_list_head_t* prev_list_head = NULL;
+
+ //there is no mm_list in current pmp_region
+ if(!mm_list_head)
+ {
+ mm_list_head = (struct mm_list_head_t*)MM_LIST_2_PADDR(mm_region);
+ mm_list_head->order = mm_region->order;
+ mm_list_head->prev_list_head = NULL;
+ mm_list_head->next_list_head = NULL;
+ mm_list_head->mm_list = mm_region;
+ mm_regions[region_idx].mm_list_head = mm_list_head;
+ return 0;
+ }
+
+ //traversal from front to back
+ while(mm_list_head && mm_list_head->order < mm_region->order)
+ {
+ prev_list_head = mm_list_head;
+ mm_list_head = mm_list_head->next_list_head;
+ }
+
+ //found the exact mm_list
+ int ret_val = 0;
+ struct mm_list_head_t *new_list_head = (struct mm_list_head_t*)MM_LIST_2_PADDR(mm_region);
+ if(mm_list_head && mm_list_head->order == mm_region->order)
+ {
+ if(!merge)
+ {
+ //insert mm_region to the first pos in mm_list
+ mm_region->prev_mm = NULL;
+ mm_region->next_mm = mm_list_head->mm_list;
+ mm_list_head->mm_list->prev_mm = mm_region;
+
+ //set mm_list_head
+ struct mm_list_head_t* next_list_head = mm_list_head->next_list_head;
+ new_list_head->order = mm_region->order;
+ new_list_head->prev_list_head = prev_list_head;
+ new_list_head->next_list_head = next_list_head;
+ new_list_head->mm_list = mm_region;
+ if(prev_list_head)
+ prev_list_head->next_list_head = new_list_head;
+ else
+ mm_regions[region_idx].mm_list_head = new_list_head;
+ if(next_list_head)
+ next_list_head->prev_list_head = new_list_head;
+ }
+ else
+ {
+ //insert with merge
+ ret_val = merge_regions(region_idx, mm_list_head, mm_region);
+ }
+ }
+ //should create a new mm_list for this mm region
+ //note that mm_list_head might be NULL
+ else
+ {
+ new_list_head->order = mm_region->order;
+ new_list_head->prev_list_head = prev_list_head;
+ new_list_head->next_list_head = mm_list_head;
+ new_list_head->mm_list = mm_region;
+ if(prev_list_head)
+ prev_list_head->next_list_head = new_list_head;
+ else
+ mm_regions[region_idx].mm_list_head = new_list_head;
+ if(mm_list_head)
+ mm_list_head->prev_list_head = new_list_head;
+ }
+
+ return ret_val;
+}
+
+//TODO: delete this function
+void print_buddy_system()
+{
+ //spinlock_lock(&pmp_bitmap_lock);
+
+ struct mm_list_head_t* mm_list_head = mm_regions[0].mm_list_head;
+ printm("struct mm_list_head_t size is 0x%lx\r\n", sizeof(struct mm_list_head_t));
+ printm("struct mm_list_t size is 0x%lx\r\n", sizeof(struct mm_list_t));
+ while(mm_list_head)
+ {
+ printm("mm_list_head addr is 0x%ln, order is %d\r\n", (long int *)mm_list_head, mm_list_head->order);
+ printm("mm_list_head prev is 0x%ln, next is 0x%ln, mm_list is 0x%ln\r\n",
+ (long int *)mm_list_head->prev_list_head,
+ (long int *)mm_list_head->next_list_head,
+ (long int*)mm_list_head->mm_list);
+ struct mm_list_t *mm_region = mm_list_head->mm_list;
+ while(mm_region)
+ {
+ printm(" mm_region addr is 0x%ln, order is %d\r\n", (long int *)mm_region, mm_region->order);
+ printm(" mm_region prev is 0x%ln, next is 0x%ln\r\n", (long int*)mm_region->prev_mm, (long int*)mm_region->next_mm);
+ mm_region = mm_region->next_mm;
+ }
+ mm_list_head = mm_list_head->next_list_head;
+ }
+
+ //spinlock_unlock(&pmp_bitmap_lock);
+}
+
+void* mm_alloc(unsigned long req_size, unsigned long *resp_size)
+{
+ void* ret_addr = NULL;
+ if(req_size == 0)
+ return ret_addr;
+
+ //TODO: reduce lock granularity
+ spin_lock(&pmp_bitmap_lock);
+
+ //print_buddy_system();
+
+ unsigned long order = ilog2(req_size-1) + 1;
+ for(int region_idx=0; region_idx < N_PMP_REGIONS; ++region_idx)
+ {
+ struct mm_list_t* mm_region = alloc_one_region(region_idx, order);
+
+ //there is no enough space in current pmp region
+ if(!mm_region)
+ continue;
+
+ while(mm_region->order > order)
+ {
+ //allocated mm region need to be split
+ mm_region->order -= 1;
+ mm_region->prev_mm = NULL;
+ mm_region->next_mm = NULL;
+
+ void* new_mm_region_paddr = MM_LIST_2_PADDR(mm_region) + (1 << mm_region->order);
+ struct mm_list_t* new_mm_region = PADDR_2_MM_LIST(new_mm_region_paddr);
+ new_mm_region->order = mm_region->order;
+ new_mm_region->prev_mm = NULL;
+ new_mm_region->next_mm = NULL;
+ insert_mm_region(region_idx, new_mm_region, 0);
+ }
+
+ ret_addr = MM_LIST_2_PADDR(mm_region);
+ break;
+ }
+
+ //print_buddy_system();
+
+ spin_unlock(&pmp_bitmap_lock);
+
+ if(ret_addr && resp_size)
+ {
+ *resp_size = 1 << order;
+ sbi_memset(ret_addr, 0, *resp_size);
+ }
+
+ return ret_addr;
+}
+
+int mm_free(void* req_paddr, unsigned long free_size)
+{
+ //check this paddr is 2^power aligned
+ uintptr_t paddr = (uintptr_t)req_paddr;
+ unsigned long order = ilog2(free_size-1) + 1;
+ unsigned long size = 1 << order;
+ if(check_mem_size(paddr, size) < 0)
+ return -1;
+
+ int ret_val = 0;
+ int region_idx = 0;
+ struct mm_list_t* mm_region = PADDR_2_MM_LIST(paddr);
+ mm_region->order = order;
+ mm_region->prev_mm = NULL;
+ mm_region->next_mm = NULL;
+
+ spin_lock(&pmp_bitmap_lock);
+
+ //print_buddy_system();
+
+ for(region_idx=0; region_idx < N_PMP_REGIONS; ++region_idx)
+ {
+ if(mm_regions[region_idx].valid && region_contain(mm_regions[region_idx].paddr, mm_regions[region_idx].size, paddr, size))
+ {
+ break;
+ }
+ }
+ if(region_idx >= N_PMP_REGIONS)
+ {
+ printm("mm_free: buddy system doesn't contain memory(addr 0x%lx, order %ld)\r\n", paddr, order);
+ ret_val = -1;
+ goto mm_free_out;
+ }
+
+ //check whether this region overlap with existing free mm_lists
+ struct mm_list_head_t* mm_list_head = mm_regions[region_idx].mm_list_head;
+ while(mm_list_head)
+ {
+ struct mm_list_t* mm_region = mm_list_head->mm_list;
+ while(mm_region)
+ {
+ uintptr_t region_paddr = (uintptr_t)MM_LIST_2_PADDR(mm_region);
+ unsigned long region_size = 1 << mm_region->order;
+ if(region_overlap(paddr, size, region_paddr, region_size))
+ {
+ printm("mm_free: memory(addr 0x%lx order %ld) overlap with free memory(addr 0x%lx order %d)\r\n", paddr, order, region_paddr, mm_region->order);
+ ret_val = -1;
+ break;
+ }
+ mm_region = mm_region->next_mm;
+ }
+ if(mm_region)
+ break;
+
+ mm_list_head = mm_list_head->next_list_head;
+ }
+ if(mm_list_head)
+ {
+ goto mm_free_out;
+ }
+
+ //insert with merge
+ ret_val = insert_mm_region(region_idx, mm_region, 1);
+ if(ret_val < 0)
+ {
+ printm("mm_free: failed to insert mm(addr 0x%lx, order %ld)\r\n in mm_regions[%d]\r\n", paddr, order, region_idx);
+ }
+
+ //printm("after mm_free\r\n");
+ //print_buddy_system();
+
+mm_free_out:
+ spin_unlock(&pmp_bitmap_lock);
+ return ret_val;
+}
diff --git a/lib/sbi/sm/platform/pmp/platform.c b/lib/sbi/sm/platform/pmp/platform.c
new file mode 100644
index 0000000..1ad07ff
--- /dev/null
+++ b/lib/sbi/sm/platform/pmp/platform.c
@@ -0,0 +1,34 @@
+#include "enclave_mm.c"
+#include "platform_thread.c"
+
+#include <sm/print.h>
+
+int platform_init()
+{
+ struct pmp_config_t pmp_config;
+
+ //Clear pmp1, this pmp is reserved for allowing kernel
+ //to config page table for enclave in enclave's memory.
+ //There is no need to broadcast to other hart as every
+ //hart will execute this function.
+ //clear_pmp(1);
+ clear_pmp_and_sync(1);
+
+ //config the PMP 0 to protect security monitor
+ pmp_config.paddr = (uintptr_t)SM_BASE;
+ pmp_config.size = (unsigned long)SM_SIZE;
+ pmp_config.mode = PMP_A_NAPOT;
+ pmp_config.perm = PMP_NO_PERM;
+ set_pmp_and_sync(0, pmp_config);
+
+ //config the last PMP to allow kernel to access memory
+ pmp_config.paddr = 0;
+ pmp_config.size = -1UL;
+ pmp_config.mode = PMP_A_NAPOT;
+ pmp_config.perm = PMP_R | PMP_W | PMP_X;
+ //set_pmp(NPMP-1, pmp_config);
+ set_pmp_and_sync(NPMP-1, pmp_config);
+
+ printm("[Penglai Monitor@%s] setting initial PMP ready\n", __func__);
+ return 0;
+}
diff --git a/lib/sbi/sm/platform/pmp/platform_thread.c b/lib/sbi/sm/platform/pmp/platform_thread.c
new file mode 100644
index 0000000..8aa9df6
--- /dev/null
+++ b/lib/sbi/sm/platform/pmp/platform_thread.c
@@ -0,0 +1,31 @@
+void platform_enter_enclave_world()
+{
+ return;
+}
+
+void platform_exit_enclave_world()
+{
+ return;
+}
+
+int platform_check_in_enclave_world()
+{
+ return 0;
+}
+
+int platform_check_enclave_authentication(struct enclave_t* enclave)
+{
+ if(enclave->thread_context.encl_ptbr != csr_read(CSR_SATP))
+ return -1;
+ return 0;
+}
+
+void platform_switch_to_enclave_ptbr(struct thread_state_t* thread, uintptr_t enclave_ptbr)
+{
+ csr_write(CSR_SATP, enclave_ptbr);
+}
+
+void platform_switch_to_host_ptbr(struct thread_state_t* thread, uintptr_t host_ptbr)
+{
+ csr_write(CSR_SATP, host_ptbr);
+}
diff --git a/lib/sbi/sm/pmp.c b/lib/sbi/sm/pmp.c
new file mode 100644
index 0000000..550a758
--- /dev/null
+++ b/lib/sbi/sm/pmp.c
@@ -0,0 +1,287 @@
+#include <sm/pmp.h>
+#include <stddef.h>
+#include <sbi/sbi_pmp.h>
+#include <sbi/sbi_console.h>
+#include <sm/sm.h>
+
+/**
+ * \brief Set pmp and sync all harts.
+ *
+ * \param pmp_idx_arg The pmp index.
+ * \param pmp_config_arg The pmp config.
+ */
+void set_pmp_and_sync(int pmp_idx_arg, struct pmp_config_t pmp_config_arg)
+{
+ struct pmp_data_t pmp_data;
+ u32 source_hart = current_hartid();
+
+ //set current hart's pmp
+ set_pmp(pmp_idx_arg, pmp_config_arg);
+ //sync all other harts
+ SBI_PMP_DATA_INIT(&pmp_data, pmp_config_arg, pmp_idx_arg, source_hart);
+ sbi_send_pmp(0xFFFFFFFF&(~(1<<source_hart)), 0, &pmp_data);
+ return;
+}
+
+/**
+ * \brief Clear pmp and sync all harts.
+ *
+ * \param pmp_idx_arg The pmp index.
+ */
+void clear_pmp_and_sync(int pmp_idx)
+{
+ struct pmp_config_t pmp_config = {0,};
+
+ pmp_config.mode = PMP_OFF;
+ set_pmp_and_sync(pmp_idx, pmp_config);
+
+ return;
+}
+
+//TODO Only handle for the __riscv_64
+void set_pmp_reg(int pmp_idx, uintptr_t* pmp_address, uintptr_t* pmp_config)
+{
+ uintptr_t tmp_pmp_address, tmp_pmp_config;
+ tmp_pmp_address = *pmp_address;
+ tmp_pmp_config = *pmp_config;
+ switch(pmp_idx)
+ {
+ case 0:
+ PMP_SET(0, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 1:
+ PMP_SET(1, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 2:
+ PMP_SET(2, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 3:
+ PMP_SET(3, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 4:
+ PMP_SET(4, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 5:
+ PMP_SET(5, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 6:
+ PMP_SET(6, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 7:
+ PMP_SET(7, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 8:
+ PMP_SET(8, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 9:
+ PMP_SET(9, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 10:
+ PMP_SET(10, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 11:
+ PMP_SET(11, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 12:
+ PMP_SET(12, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 13:
+ PMP_SET(13, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 14:
+ PMP_SET(14, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 15:
+ PMP_SET(15, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ default:
+ break;
+ }
+ *pmp_address = tmp_pmp_address;
+ *pmp_config = tmp_pmp_config;
+}
+
+/**
+ * \brief get pmp reg
+ */
+void get_pmp_reg(int pmp_idx, uintptr_t* pmp_address, uintptr_t* pmp_config)
+{
+ uintptr_t tmp_pmp_address=0, tmp_pmp_config=0;
+ switch(pmp_idx)
+ {
+ case 0:
+ PMP_READ(0, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 1:
+ PMP_READ(1, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 2:
+ PMP_READ(2, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 3:
+ PMP_READ(3, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 4:
+ PMP_READ(4, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 5:
+ PMP_READ(5, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 6:
+ PMP_READ(6, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 7:
+ PMP_READ(7, 0, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 8:
+ PMP_READ(8, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 9:
+ PMP_READ(9, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 10:
+ PMP_READ(10, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 11:
+ PMP_READ(11, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 12:
+ PMP_READ(12, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 13:
+ PMP_READ(13, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 14:
+ PMP_READ(14, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ case 15:
+ PMP_READ(15, 2, tmp_pmp_address, tmp_pmp_config);
+ break;
+ default:
+ break;
+ }
+ *pmp_address = tmp_pmp_address;
+ *pmp_config = tmp_pmp_config;
+}
+
+/**
+ * \brief set current hart's pmp
+ *
+ * \param pmp_idx the index of target PMP register
+ * \param pmp_cfg the configuration of the PMP register
+ */
+void set_pmp(int pmp_idx, struct pmp_config_t pmp_cfg_t)
+{
+ uintptr_t pmp_address = 0;
+ //uintptr_t old_config = 0;
+#define PMP_CONFIG_OFFSET(pmp_idx) ((uintptr_t)PMPCFG_BIT_NUM * (pmp_idx % PMP_PER_CFG_REG))
+ uintptr_t pmp_config = ((pmp_cfg_t.mode & PMP_A) | (pmp_cfg_t.perm & (PMP_R|PMP_W|PMP_X)))
+ << PMP_CONFIG_OFFSET(pmp_idx);
+
+ switch(pmp_cfg_t.mode)
+ {
+ case PMP_A_NAPOT:
+ if(pmp_cfg_t.paddr == 0 && pmp_cfg_t.size == -1UL)
+ pmp_address = -1UL;
+ else
+ pmp_address = (pmp_cfg_t.paddr | ((pmp_cfg_t.size>>1)-1)) >> 2;
+ break;
+ case PMP_A_TOR:
+ pmp_address = pmp_cfg_t.paddr;
+ break;
+ case PMP_A_NA4:
+ pmp_address = pmp_cfg_t.paddr;
+ case PMP_OFF:
+ pmp_address = 0;
+ break;
+ default:
+ pmp_address = 0;
+ break;
+ }
+ set_pmp_reg(pmp_idx, &pmp_address, &pmp_config);
+
+ return;
+}
+
+/**
+ * \brief clear the configuration of a PMP register
+ *
+ * \param pmp_idx the index of target PMP register
+ */
+void clear_pmp(int pmp_idx)
+{
+ struct pmp_config_t pmp_cfg_t;
+
+ pmp_cfg_t.mode = PMP_OFF;
+ pmp_cfg_t.perm = PMP_NO_PERM;
+ pmp_cfg_t.paddr = 0;
+ pmp_cfg_t.size = 0;
+ set_pmp(pmp_idx, pmp_cfg_t);
+
+ return;
+}
+
+/**
+ * \brief Get the configuration of a pmp register (pmp_idx)
+ *
+ * \param pmp_idx the index of target PMP register
+ */
+struct pmp_config_t get_pmp(int pmp_idx)
+{
+ struct pmp_config_t pmp = {0,};
+ uintptr_t pmp_address = 0;
+ uintptr_t pmp_config = 0;
+ unsigned long order = 0;
+ unsigned long size = 0;
+
+ //set_pmp_reg(pmp_idx, &pmp_address, &pmp_config);
+ get_pmp_reg(pmp_idx, &pmp_address, &pmp_config);
+
+
+ pmp_config >>= (uintptr_t)PMPCFG_BIT_NUM * (pmp_idx % PMP_PER_CFG_REG);
+ pmp_config &= PMPCFG_BITS;
+ switch(pmp_config & PMP_A)
+ {
+ case PMP_A_NAPOT:
+ while(pmp_address & 1)
+ {
+ order += 1;
+ pmp_address >>= 1;
+ }
+ order += 3;
+ size = 1 << order;
+ pmp_address <<= (order-1);
+ break;
+ case PMP_A_NA4:
+ size = 4;
+ break;
+ case PMP_A_TOR:
+ break;
+ case PMP_OFF:
+ pmp_address = 0;
+ size = 0;
+ break;
+ }
+
+ pmp.mode = pmp_config & PMP_A;
+ pmp.perm = pmp_config & (PMP_R | PMP_W | PMP_X);
+ pmp.paddr = pmp_address;
+ pmp.size = size;
+
+ return pmp;
+}
+
+/**
+ * \brief Dump PMP registers, only used for debug
+ */
+void dump_pmps(void)
+{
+ /*FIXME: we can have different number of PMP regions */
+ int i;
+ for (i=0; i<16; i++){
+ struct pmp_config_t pmp = get_pmp(i);
+ (void)pmp; //to ignore the unused variable warnings
+ printm("[Debug:SM@%s] pmp_%d: mode(0x%lx) perm(0x%lx) paddr(0x%lx) size(0x%lx)\n",
+ __func__, i, pmp.mode, pmp.perm, pmp.paddr, pmp.size);
+ }
+}
diff --git a/lib/sbi/sm/sm.ac b/lib/sbi/sm/sm.ac
new file mode 100644
index 0000000..0479971
--- /dev/null
+++ b/lib/sbi/sm/sm.ac
@@ -0,0 +1,3 @@
+AC_ARG_WITH([target_platform], AS_HELP_STRING([--with-target-platform], [Set a specific platform for the sm to build with]),
+ [AC_SUBST([TARGET_PLATFORM], $with_target_platform, [Set a specific platform for the sm to build with])],
+ [AC_SUBST([TARGET_PLATFORM], pmp, [Set a specific platform for the sm to build with])])
diff --git a/lib/sbi/sm/sm.c b/lib/sbi/sm/sm.c
new file mode 100644
index 0000000..03bf677
--- /dev/null
+++ b/lib/sbi/sm/sm.c
@@ -0,0 +1,204 @@
+//#include <sm/atomic.h>
+#include <sbi/riscv_atomic.h>
+#include <sm/sm.h>
+#include <sm/pmp.h>
+#include <sm/enclave.h>
+#include <sm/math.h>
+#include <sbi/sbi_console.h>
+
+//static int sm_initialized = 0;
+//static spinlock_t sm_init_lock = SPINLOCK_INIT;
+
+void sm_init()
+{
+ platform_init();
+}
+
+uintptr_t sm_mm_init(uintptr_t paddr, unsigned long size)
+{
+ uintptr_t retval = 0;
+
+ printm("[Penglai Monitor] %s invoked\r\n",__func__);
+
+ printm("[Penglai Monitor] %s paddr:0x%lx, size:0x%lx\r\n",__func__, paddr, size);
+ /*DEBUG: Dump PMP registers here */
+ dump_pmps();
+ retval = mm_init(paddr, size);
+ /*DEBUG: Dump PMP registers here */
+ dump_pmps();
+
+ printm("[Penglai Monitor] %s ret:%ld \r\n",__func__, retval);
+ return retval;
+}
+
+uintptr_t sm_mm_extend(uintptr_t paddr, unsigned long size)
+{
+ uintptr_t retval = 0;
+ printm("[Penglai Monitor] %s invoked\r\n",__func__);
+
+ retval = mm_init(paddr, size);
+
+ printm("[Penglai Monitor] %s return:%ld\r\n",__func__, retval);
+ return retval;
+}
+
+uintptr_t sm_debug_print(uintptr_t* regs, uintptr_t arg0)
+{
+ print_buddy_system();
+ return 0;
+}
+
+uintptr_t sm_alloc_enclave_mem(uintptr_t mm_alloc_arg)
+{
+ struct mm_alloc_arg_t mm_alloc_arg_local;
+ uintptr_t retval = 0;
+
+ printm("[Penglai Monitor] %s invoked\r\n",__func__);
+
+ retval = copy_from_host(&mm_alloc_arg_local,
+ (struct mm_alloc_arg_t*)mm_alloc_arg,
+ sizeof(struct mm_alloc_arg_t));
+ if(retval != 0)
+ {
+ printm_err("M mode: sm_alloc_enclave_mem: unknown error happended when copy from host\r\n");
+ return ENCLAVE_ERROR;
+ }
+
+ dump_pmps();
+ unsigned long resp_size = 0;
+ void* paddr = mm_alloc(mm_alloc_arg_local.req_size, &resp_size);
+ if(paddr == NULL)
+ {
+ printm("M mode: sm_alloc_enclave_mem: no enough memory\r\n");
+ return ENCLAVE_NO_MEMORY;
+ }
+ dump_pmps();
+
+ //grant kernel access to this memory
+ if(grant_kernel_access(paddr, resp_size) != 0)
+ {
+ printm_err("M mode: ERROR: faile to grant kernel access to pa 0x%lx, size 0x%lx\r\n", (unsigned long) paddr, resp_size);
+ mm_free(paddr, resp_size);
+ return ENCLAVE_ERROR;
+ }
+
+ mm_alloc_arg_local.resp_addr = (uintptr_t)paddr;
+ mm_alloc_arg_local.resp_size = resp_size;
+
+ copy_to_host((struct mm_alloc_arg_t*)mm_alloc_arg,
+ &mm_alloc_arg_local,
+ sizeof(struct mm_alloc_arg_t));
+
+ printm("[Penglai Monitor] %s return:%ld\r\n",__func__, retval);
+
+ return ENCLAVE_SUCCESS;
+}
+
+uintptr_t sm_create_enclave(uintptr_t enclave_sbi_param)
+{
+ struct enclave_sbi_param_t enclave_sbi_param_local;
+ uintptr_t retval = 0;
+
+ printm("[Penglai Monitor] %s invoked\r\n",__func__);
+
+ retval = copy_from_host(&enclave_sbi_param_local,
+ (struct enclave_sbi_param_t*)enclave_sbi_param,
+ sizeof(struct enclave_sbi_param_t));
+
+ void* paddr = (void*)enclave_sbi_param_local.paddr;
+ unsigned long size = (unsigned long)enclave_sbi_param_local.size;
+ if(retrieve_kernel_access(paddr, size) != 0)
+ {
+ mm_free(paddr, size);
+ return -1UL;
+ }
+
+ retval = create_enclave(enclave_sbi_param_local);
+
+ printm("[Penglai Monitor] %s created return value:%ld \r\n",__func__, retval);
+ return retval;
+}
+
+uintptr_t sm_run_enclave(uintptr_t* regs, unsigned long eid)
+{
+ uintptr_t retval;
+ printm("[Penglai Monitor] %s invoked, eid:%ld\r\n",__func__, eid);
+
+ retval = run_enclave(regs, (unsigned int)eid);
+
+ printm("[Penglai Monitor] %s return: %ld\r\n",__func__, retval);
+
+ return retval;
+}
+
+uintptr_t sm_stop_enclave(uintptr_t* regs, unsigned long eid)
+{
+ uintptr_t retval;
+ printm("[Penglai Monitor] %s invoked, eid:%ld\r\n",__func__, eid);
+
+ retval = stop_enclave(regs, (unsigned int)eid);
+
+ printm("[Penglai Monitor] %s return: %ld\r\n",__func__, retval);
+ return retval;
+}
+
+uintptr_t sm_resume_enclave(uintptr_t* regs, unsigned long eid)
+{
+ uintptr_t retval = 0;
+ uintptr_t resume_func_id = regs[11];
+
+ switch(resume_func_id)
+ {
+ case RESUME_FROM_TIMER_IRQ:
+ retval = resume_enclave(regs, eid);
+ break;
+ case RESUME_FROM_STOP:
+ retval = resume_from_stop(regs, eid);
+ break;
+ default:
+ break;
+ }
+
+ return retval;
+}
+
+uintptr_t sm_exit_enclave(uintptr_t* regs, unsigned long retval)
+{
+ uintptr_t ret;
+ printm("[Penglai Monitor] %s invoked\r\n",__func__);
+
+ ret = exit_enclave(regs, retval);
+
+ printm("[Penglai Monitor] %s return: %ld\r\n",__func__, ret);
+
+ return ret;
+}
+
+/**
+ * \brief This transitional function is used to destroy the enclave.
+ *
+ * \param regs The host reg.
+ * \param enclave_eid The enclave id.
+ */
+uintptr_t sm_destroy_enclave(uintptr_t *regs, uintptr_t enclave_id)
+{
+ uintptr_t ret = 0;
+ printm("[Penglai Monitor] %s invoked\r\n",__func__);
+
+ ret = destroy_enclave(regs, enclave_id);
+
+ printm("[Penglai Monitor] %s return: %ld\r\n",__func__, ret);
+
+ return ret;
+}
+
+uintptr_t sm_do_timer_irq(uintptr_t *regs, uintptr_t mcause, uintptr_t mepc)
+{
+ uintptr_t ret;
+
+ ret = do_timer_irq(regs, mcause, mepc);
+
+ regs[10] = 0; //no errors in all cases for timer handler
+ regs[11] = ret; //value
+ return ret;
+}
diff --git a/lib/sbi/sm/sm.mk.in b/lib/sbi/sm/sm.mk.in
new file mode 100644
index 0000000..649d773
--- /dev/null
+++ b/lib/sbi/sm/sm.mk.in
@@ -0,0 +1,25 @@
+sm_hdrs = \
+ pmp.h \
+ sm.h \
+ enclave_args.h \
+ enclave.h \
+ platform/@TARGET_PLATFORM@/platform.h \
+ thread.h \
+ math.h
+
+sm_c_srcs = \
+ ipi.c \
+ pmp.c \
+ platform/@TARGET_PLATFORM@/platform.c \
+ sm.c \
+ enclave.c \
+ thread.c \
+ math.c
+
+sm_asm_srcs = \
+
+
+sm_test_srcs =
+
+
+sm_install_prog_srcs =
diff --git a/lib/sbi/sm/thread.c b/lib/sbi/sm/thread.c
new file mode 100644
index 0000000..2ecc419
--- /dev/null
+++ b/lib/sbi/sm/thread.c
@@ -0,0 +1,67 @@
+#include <sm/thread.h>
+//#include <sm/mtrap.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_asm.h>
+
+void swap_prev_state(struct thread_state_t* thread, uintptr_t* regs)
+{
+ int i;
+
+ uintptr_t* prev = (uintptr_t*) &thread->prev_state;
+ for(i = 1; i < N_GENERAL_REGISTERS; ++i)
+ {
+ /* swap general registers */
+ uintptr_t tmp = prev[i];
+ prev[i] = regs[i];
+ regs[i] = tmp;
+ }
+
+ return;
+}
+
+void swap_prev_mepc(struct thread_state_t* thread, uintptr_t current_mepc)
+{
+ uintptr_t tmp = thread->prev_mepc;
+ thread->prev_mepc = current_mepc;
+ csr_write(CSR_MEPC, tmp);
+}
+
+void swap_prev_stvec(struct thread_state_t* thread, uintptr_t current_stvec)
+{
+ uintptr_t tmp = thread->prev_stvec;
+ thread->prev_stvec = current_stvec;
+ csr_write(CSR_STVEC, tmp);
+}
+
+/*
+ * Cache line binding is only workable
+ * when the hardware supports penglai's on-demand cacheline locking
+ * */
+void swap_prev_cache_binding(struct thread_state_t* thread, uintptr_t current_cache_binding)
+{
+#if 0
+ uintptr_t tmp = thread->prev_cache_binding;
+ thread->prev_cache_binding = current_cache_binding;
+#endif
+}
+
+void swap_prev_mie(struct thread_state_t* thread, uintptr_t current_mie)
+{
+ uintptr_t tmp = thread->prev_mie;
+ thread->prev_mie = current_mie;
+ csr_write(CSR_MIE, tmp);
+}
+
+void swap_prev_mideleg(struct thread_state_t* thread, uintptr_t current_mideleg)
+{
+ uintptr_t tmp = thread->prev_mideleg;
+ thread->prev_mideleg = current_mideleg;
+ csr_write(CSR_MIDELEG, tmp);
+}
+
+void swap_prev_medeleg(struct thread_state_t* thread, uintptr_t current_medeleg)
+{
+ uintptr_t tmp = thread->prev_medeleg;
+ thread->prev_medeleg = current_medeleg;
+ csr_write(CSR_MEDELEG, tmp);
+}
diff --git a/lib/sbi/sm/utils.c b/lib/sbi/sm/utils.c
new file mode 100644
index 0000000..091209b
--- /dev/null
+++ b/lib/sbi/sm/utils.c
@@ -0,0 +1,40 @@
+/*
+ * Author: Dong Du
+ * */
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_string.h>
+#include <sbi/riscv_locks.h>
+#include <sbi/sbi_console.h>
+#include <sm/utils.h>
+#include <sm/sm.h>
+
+/*
+ * Go through and dump a page table, used for debug
+ * */
+void dump_pt(unsigned long *page_table, int level)
+{
+ int l1, i;
+ unsigned long* l1_pt = page_table;
+
+ if (!l1_pt)
+ return;
+
+ //only consider sv39 now
+ for (l1=0; l1<512; l1++){
+ if (!(l1_pt[l1] & PTE_V)) //this entry is not valid
+ continue;
+
+ for (i=0; i<level; i++) printm("\t"); //space before entries
+ printm("%d: 0x%lx, perm: 0x%lx\n",l1, l1_pt[l1], l1_pt[l1] & (PTE_R | PTE_W | PTE_X));
+ if (!PTE_TABLE(l1_pt[l1])) // not page table page
+ continue;
+
+ if (level == 3) // the last level
+ continue;
+
+ //goto the next level
+ dump_pt((unsigned long*) ((l1_pt[l1]>>PTE_PPN_SHIFT)<<RISCV_PGSHIFT), level+1);
+ }
+
+ return;
+}
--
2.31.1
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/dongduResearcher/opensbi.git
git@gitee.com:dongduResearcher/opensbi.git
dongduResearcher
opensbi
opensbi
master

搜索帮助

D67c1975 1850385 1daf7b77 1850385