forked from intel/linux-sgx-driver
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sgx_ioctl.c
1155 lines (988 loc) · 31.8 KB
/
sgx_ioctl.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Contact Information:
* Jarkko Sakkinen <[email protected]>
* Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
*
* BSD LICENSE
*
* Copyright(c) 2016 Intel Corporation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors:
*
* Jarkko Sakkinen <[email protected]>
* Suresh Siddha <[email protected]>
* Serge Ayoun <[email protected]>
* Shay Katz-zamir <[email protected]>
* Sean Christopherson <[email protected]>
*/
#include "sgx.h"
#include <asm/mman.h>
#include <linux/delay.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/ratelimit.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
#include <linux/sched/signal.h>
#else
#include <linux/signal.h>
#endif
#include <linux/slab.h>
#include <linux/hashtable.h>
#include <linux/shmem_fs.h>
/**
* struct sgx_add_page_req - add-page request for sgx_add_page_worker
*
* @encl: corresponding enclave
* @encl_page: enclave page inited with ELRANGE address, EPC page, VA slot, etc.
* @secinfo: SECINFO data (RWX/page type flags)
* @mrmask: bitmask for the 256 byte chunks that are to be measured
* @list: item in list of add-page requests
*/
struct sgx_add_page_req {
struct sgx_encl *encl;
struct sgx_encl_page *encl_page;
struct sgx_secinfo secinfo;
u16 mrmask;
struct list_head list;
};
/* minimum allowed Launch Enclave's isv svn (Security Version Number) */
static u16 sgx_isvsvnle_min;
/* # processes (with at least one enclave) serviced by driver */
atomic_t sgx_nr_pids = ATOMIC_INIT(0);
/* search through sgx_tgid_ctx_list and find TGID context for tgid PID */
static struct sgx_tgid_ctx *sgx_find_tgid_ctx(struct pid *tgid)
{
struct sgx_tgid_ctx *ctx;
list_for_each_entry(ctx, &sgx_tgid_ctx_list, list)
if (pid_nr(ctx->tgid) == pid_nr(tgid))
return ctx;
return NULL;
}
/* associate enclave with host app TGID context (create new if needed) */
static int sgx_add_to_tgid_ctx(struct sgx_encl *encl)
{
struct sgx_tgid_ctx *ctx;
struct pid *tgid = get_pid(task_tgid(current)); /* PID of host app */
mutex_lock(&sgx_tgid_ctx_mutex);
/* first try to find TGID context for host app */
ctx = sgx_find_tgid_ctx(tgid);
if (ctx) {
if (kref_get_unless_zero(&ctx->refcount)) {
/* associate enclave with found TGID context */
encl->tgid_ctx = ctx;
mutex_unlock(&sgx_tgid_ctx_mutex);
put_pid(tgid);
return 0;
}
else
list_del_init(&ctx->list);
}
/* no TGID context yet (host app creates its first enclave), create one */
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
mutex_unlock(&sgx_tgid_ctx_mutex);
put_pid(tgid);
return -ENOMEM;
}
ctx->tgid = tgid;
kref_init(&ctx->refcount);
INIT_LIST_HEAD(&ctx->encl_list);
list_add(&ctx->list, &sgx_tgid_ctx_list);
atomic_inc(&sgx_nr_pids);
encl->tgid_ctx = ctx;
mutex_unlock(&sgx_tgid_ctx_mutex);
return 0;
}
/* remove unused TGID context; may be called on sgx_encl_release() */
void sgx_tgid_ctx_release(struct kref *ref)
{
struct sgx_tgid_ctx *pe =
container_of(ref, struct sgx_tgid_ctx, refcount);
mutex_lock(&sgx_tgid_ctx_mutex);
list_del(&pe->list);
atomic_dec(&sgx_nr_pids);
mutex_unlock(&sgx_tgid_ctx_mutex);
put_pid(pe->tgid);
kfree(pe);
}
/* find enclave containing addr, kref_get pointer to it, and return in encl */
static int sgx_find_and_get_encl(unsigned long addr, struct sgx_encl **encl)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int ret;
down_read(&mm->mmap_sem);
ret = sgx_find_encl(mm, addr, &vma);
if (!ret) {
*encl = vma->vm_private_data;
kref_get(&(*encl)->refcount); /* encl will be used in caller */
}
up_read(&mm->mmap_sem);
return ret;
}
/**
* sgx_measure - EEXTEND 256B chunks of EPC page based on mrmask
*
* @secs_page: SECS page address identifying enclave
* @epc_page: to-measure EPC page address
* @mrmask: 16-bit mask of chunks to measure
* @return 0 on success, negative on error
*/
static int sgx_measure(struct sgx_epc_page *secs_page,
struct sgx_epc_page *epc_page,
u16 mrmask)
{
void *secs;
void *epc;
int ret = 0;
int i, j;
/* go through 16 256B chunks of data and EEXTEND if specified in mrmask */
for (i = 0, j = 1; i < 0x1000 && !ret; i += 0x100, j <<= 1) {
if (!(j & mrmask))
continue;
secs = sgx_get_page(secs_page);
epc = sgx_get_page(epc_page);
/**
* ENCLS(EEXTEND) instruction with:
* secs - device address of SECS page (obsolete in SGX2 ???)
* epc+i - device address of 256B chunk of to-measure EPC page
*/
ret = __eextend(secs, (void *)((unsigned long)epc + i));
sgx_put_page(epc);
sgx_put_page(secs);
}
return ret;
}
/**
* sgx_add_page - EADD page to enclave from user-supplied backing RAM page
*
* @secs_page: SECS page address identifying enclave
* @epc_page: to-add EPC page address
* @linaddr: address in the ELRANGE
* @secinfo: SECINFO data (RWX/page type flags)
* @backing: backing RAM page containing user-supplied data
* @return 0 on success, negative on error
*/
static int sgx_add_page(struct sgx_epc_page *secs_page,
struct sgx_epc_page *epc_page,
unsigned long linaddr,
struct sgx_secinfo *secinfo,
struct page *backing)
{
struct sgx_page_info pginfo;
void *epc_page_vaddr;
int ret;
/* map src page in, get device addresses of SECS and EPC pages */
pginfo.srcpge = (unsigned long)kmap_atomic(backing);
pginfo.secs = (unsigned long)sgx_get_page(secs_page);
epc_page_vaddr = sgx_get_page(epc_page);
pginfo.linaddr = linaddr;
pginfo.secinfo = (unsigned long)secinfo;
/**
* ENCLS(EADD) instruction with:
* pginfo.srcpge - kernel address of source (user-supplied backing) page
* pginfo.secs - device address of SECS
* pginfo.linaddr - virtual address in ELRANGE
* pginfo.secinfo - kernel address of SECINFO (RWX/page type flags)
* epc_page_vaddr - device address of to-add EPC page
*/
ret = __eadd(&pginfo, epc_page_vaddr);
sgx_put_page(epc_page_vaddr);
sgx_put_page((void *)(unsigned long)pginfo.secs);
kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
return ret;
}
/**
* sgx_process_add_page_req - separate kernel thread EADDs enclave page
*
* @req: add-page request with enclave page, SECINFO data, measurement chunks
* @return true on success, false on error
*
* Grabs physical EPC page, associates with virtual enclave page in VMA,
* adds page with EADD, and measure user-specified chunks of page with EEXTEND.
*/
static bool sgx_process_add_page_req(struct sgx_add_page_req *req)
{
struct page *backing;
struct sgx_epc_page *epc_page;
struct sgx_encl_page *encl_page = req->encl_page;
struct sgx_encl *encl = req->encl;
struct vm_area_struct *vma;
int ret;
/* grab free EPC page to write user-supplied data to */
epc_page = sgx_alloc_page(0);
if (IS_ERR(epc_page))
return false;
down_read(&encl->mm->mmap_sem);
mutex_lock(&encl->lock);
if (encl->flags & SGX_ENCL_DEAD)
goto out;
/* find VMA corresponding to enclave */
if (sgx_find_encl(encl->mm, encl_page->addr, &vma))
goto out;
backing = sgx_get_backing(encl, encl_page, false);
if (IS_ERR(backing))
goto out;
/* Do not race with do_exit() */
if (!atomic_read(&encl->mm->mm_users)) {
sgx_put_backing(backing, 0); /* should be false not 0 ??? */
goto out;
}
/* update enclave VMA with virtual->physical mapping of new page */
ret = vm_insert_pfn(vma, encl_page->addr, PFN_DOWN(epc_page->pa));
if (ret)
goto out;
/* actually add page with EADD */
ret = sgx_add_page(encl->secs_page.epc_page, epc_page,
encl_page->addr, &req->secinfo, backing);
sgx_put_backing(backing, 0); /* should be false not 0 ??? */
if (ret) {
sgx_warn(encl, "EADD returned %d\n", ret);
/* destroy VMA mapping made earlier with vm_insert_pfn() */
zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
goto out;
}
/* increase number of loaded (present) EPC pages in enclave */
encl->secs_child_cnt++;
/* measure selected 256B chunks of page data with EEXTEND */
ret = sgx_measure(encl->secs_page.epc_page, epc_page, req->mrmask);
if (ret) {
sgx_warn(encl, "EEXTEND returned %d\n", ret);
/* destroy VMA mapping made earlier with vm_insert_pfn() */
zap_vma_ptes(vma, encl_page->addr, PAGE_SIZE);
goto out;
}
/* associate enclave page with new EPC page & mark as loaded in EPC */
encl_page->epc_page = epc_page;
sgx_test_and_clear_young(encl_page, encl); /* clear access bit */
list_add_tail(&encl_page->load_list, &encl->load_list);
mutex_unlock(&encl->lock);
up_read(&encl->mm->mmap_sem);
return true;
out:
/* cleanup in error case */
sgx_free_page(epc_page, encl);
mutex_unlock(&encl->lock);
up_read(&encl->mm->mmap_sem);
return false;
}
/**
* sgx_add_page_worker - separate kernel thread to EADD enclave pages
*
* @work: needed only to identify enclave
*
* Worker is awaken each time when __encl_add_page() supplies at least one
* sgx_add_page_req request and keeps working until no more requests arrive.
*/
static void sgx_add_page_worker(struct work_struct *work)
{
struct sgx_encl *encl;
struct sgx_add_page_req *req;
bool skip_rest = false;
bool is_empty = false;
/* identify enclave of this add-page worker */
encl = container_of(work, struct sgx_encl, add_page_work);
/* extract add-page requests until add_page_reqs list is empty */
do {
schedule();
/* if enclave became dead, do not EADD pages but still exhaust list */
if (encl->flags & SGX_ENCL_DEAD)
skip_rest = true;
/* extract one add-page request from list */
mutex_lock(&encl->lock);
req = list_first_entry(&encl->add_page_reqs,
struct sgx_add_page_req, list);
list_del(&req->list);
is_empty = list_empty(&encl->add_page_reqs);
mutex_unlock(&encl->lock);
if (!skip_rest) {
/* try to process this request and perform EADD */
if (!sgx_process_add_page_req(req)) {
skip_rest = true;
sgx_dbg(encl, "EADD failed 0x%p\n",
(void *)req->encl_page->addr);
}
}
kfree(req);
} while (!kref_put(&encl->refcount, sgx_encl_release) &&
!is_empty);
}
static int sgx_validate_secs(const struct sgx_secs *secs)
{
u32 needed_ssaframesize = 1;
u32 tmp;
int i;
if (secs->flags & SGX_SECS_A_RESERVED_MASK)
return -EINVAL;
if (secs->flags & SGX_SECS_A_MODE64BIT) {
#ifdef CONFIG_X86_64
if (secs->size > sgx_encl_size_max_64)
return -EINVAL;
#else
return -EINVAL;
#endif
} else {
/* On 64-bit architecture allow 32-bit encls only in
* the compatibility mode.
*/
#ifdef CONFIG_X86_64
if (!test_thread_flag(TIF_ADDR32))
return -EINVAL;
#endif
if (secs->size > sgx_encl_size_max_32)
return -EINVAL;
}
if ((secs->xfrm & 0x3) != 0x3 || (secs->xfrm & ~sgx_xfrm_mask))
return -EINVAL;
/* Check that BNDREGS and BNDCSR are equal. */
if (((secs->xfrm >> 3) & 1) != ((secs->xfrm >> 4) & 1))
return -EINVAL;
/* Find biggest needed size for XSAVE area based on enclave's XFRM */
for (i = 2; i < 64; i++) {
tmp = sgx_ssaframesize_tbl[i];
if (((1 << i) & secs->xfrm) && (tmp > needed_ssaframesize))
needed_ssaframesize = tmp;
}
if (!secs->ssaframesize || !needed_ssaframesize ||
needed_ssaframesize > secs->ssaframesize)
return -EINVAL;
/* Must be power of two */
if (secs->size == 0 || (secs->size & (secs->size - 1)) != 0)
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED1_SIZE; i++)
if (secs->reserved1[i])
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED2_SIZE; i++)
if (secs->reserved2[i])
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED3_SIZE; i++)
if (secs->reserved3[i])
return -EINVAL;
for (i = 0; i < SGX_SECS_RESERVED4_SIZE; i++)
if (secs->reserved[i])
return -EINVAL;
return 0;
}
/**
* sgx_init_page - init entry with enclave page metadata
*
* @encl: enclave containing entry page
* @entry: enclave page to initialize
* @addr: destination address in ELRANGE
* @return 0 on success, negative on error
*
* For allocated but uninitialized enclave page (entry), finds & assignes
* corresponding VA slot + set address from addr.
*/
static int sgx_init_page(struct sgx_encl *encl,
struct sgx_encl_page *entry,
unsigned long addr)
{
struct sgx_va_page *va_page;
struct sgx_epc_page *epc_page = NULL;
unsigned int va_offset = PAGE_SIZE;
void *vaddr;
int ret = 0;
/* iterate through all available VA pages in enclave, find first free
VA slot, update VA page metadata, and return slot offset */
list_for_each_entry(va_page, &encl->va_pages, list) {
va_offset = sgx_alloc_va_slot(va_page);
if (va_offset < PAGE_SIZE)
break;
}
/* no free VA slot found, create new VA page and use slot from it */
if (va_offset == PAGE_SIZE) {
/* allocate metadata for VA; note that slots bitmap is all-zeros */
va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
if (!va_page)
return -ENOMEM;
/* grab free EPC page to be converted into VA page */
epc_page = sgx_alloc_page(0);
if (IS_ERR(epc_page)) {
kfree(va_page);
return PTR_ERR(epc_page);
}
/* translate EPC page address into device EPC bank+offset address */
vaddr = sgx_get_page(epc_page);
if (!vaddr) {
sgx_warn(encl, "kmap of a new VA page failed %d\n",
ret);
sgx_free_page(epc_page, encl);
kfree(va_page);
return -EFAULT;
}
/**
* ENCLS(EPA) instruction to convert page into Version Array page:
* vaddr - device address of empty EPC page
*/
ret = __epa(vaddr);
sgx_put_page(vaddr);
if (ret) {
sgx_warn(encl, "EPA returned %d\n", ret);
sgx_free_page(epc_page, encl);
kfree(va_page);
return -EFAULT;
}
/* fill metadata for newly created VA page and find free slot */
va_page->epc_page = epc_page;
va_offset = sgx_alloc_va_slot(va_page);
/* add VA page to list of available VA pages of enclave */
mutex_lock(&encl->lock);
list_add(&va_page->list, &encl->va_pages);
mutex_unlock(&encl->lock);
}
entry->va_page = va_page;
entry->va_offset = va_offset;
entry->addr = addr;
return 0;
}
/* if host notifies about release of host-app memory, declare enclave dead */
static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct sgx_encl *encl =
container_of(mn, struct sgx_encl, mmu_notifier);
mutex_lock(&encl->lock);
encl->flags |= SGX_ENCL_DEAD;
mutex_unlock(&encl->lock);
}
static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
.release = sgx_mmu_notifier_release,
};
/**
* sgx_ioc_enclave_create - handler for SGX_IOC_ENCLAVE_CREATE
*
* @filep: open file to /dev/sgx
* @cmd: the command value
* @arg: pointer to the struct sgx_enclave_create
* @return 0 on success, negative on error
*
* Creates meta-data for an enclave and executes ENCLS(ECREATE)
*/
static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
unsigned long arg)
{
struct sgx_enclave_create *createp = (struct sgx_enclave_create *)arg;
unsigned long src = (unsigned long)createp->src;
struct sgx_page_info pginfo;
struct sgx_secinfo secinfo;
struct sgx_encl *encl = NULL;
struct sgx_secs *secs = NULL;
struct sgx_epc_page *secs_epc;
struct vm_area_struct *vma;
void *secs_vaddr = NULL;
struct file *backing;
struct file *pcmd;
long ret;
secs = kzalloc(sizeof(*secs), GFP_KERNEL);
if (!secs)
return -ENOMEM;
ret = copy_from_user(secs, (void __user *)src, sizeof(*secs));
if (ret) {
kfree(secs);
return ret;
}
if (sgx_validate_secs(secs)) {
kfree(secs);
return -EINVAL;
}
/* anonymous RAM memory region for EPC pages' content;
size of enclave + one additional page for SECS */
backing = shmem_file_setup("dev/sgx", secs->size + PAGE_SIZE,
VM_NORESERVE);
if (IS_ERR(backing)) {
ret = PTR_ERR(backing);
goto out;
}
/* anonymous RAM memory region for EPC pages' metadata (PCMD);
divide by 32 since one 4KB page corresponds to one 128B PCMD entry */
pcmd = shmem_file_setup("dev/sgx",
(secs->size + PAGE_SIZE) >> 5,
VM_NORESERVE);
if (IS_ERR(pcmd)) {
fput(backing);
ret = PTR_ERR(pcmd);
goto out;
}
encl = kzalloc(sizeof(*encl), GFP_KERNEL);
if (!encl) {
fput(backing);
fput(pcmd);
ret = -ENOMEM;
goto out;
}
kref_init(&encl->refcount);
INIT_LIST_HEAD(&encl->add_page_reqs);
INIT_LIST_HEAD(&encl->va_pages);
INIT_RADIX_TREE(&encl->page_tree, GFP_KERNEL);
INIT_LIST_HEAD(&encl->load_list);
INIT_LIST_HEAD(&encl->encl_list);
mutex_init(&encl->lock);
INIT_WORK(&encl->add_page_work, sgx_add_page_worker);
encl->mm = current->mm; /* current is process that issued syscall */
encl->base = secs->base;
encl->size = secs->size;
encl->backing = backing;
encl->pcmd = pcmd;
/* grab free EPC page to serve as SECS page */
secs_epc = sgx_alloc_page(0);
if (IS_ERR(secs_epc)) {
ret = PTR_ERR(secs_epc);
secs_epc = NULL;
goto out;
}
/* associate enclave with host app TGID context (create new if needed) */
ret = sgx_add_to_tgid_ctx(encl);
if (ret)
goto out;
/* init enclave secs_page: find & assign corresponding VA slot +
set address to right after ELRANGE (to differentiate from other pages) */
ret = sgx_init_page(encl, &encl->secs_page,
encl->base + encl->size);
if (ret)
goto out;
/* translate EPC page address into device EPC bank+offset address */
secs_vaddr = sgx_get_page(secs_epc);
pginfo.srcpge = (unsigned long)secs;
pginfo.linaddr = 0;
pginfo.secinfo = (unsigned long)&secinfo;
pginfo.secs = 0;
memset(&secinfo, 0, sizeof(secinfo));
/**
* ENCLS(ECREATE) instruction with:
* pginfo.srcpge - kernel address of source SECS page
* pginfo.<other> - unused, set to zero
* secs_vaddr - device address of destination SECS page
*/
ret = __ecreate((void *)&pginfo, secs_vaddr);
sgx_put_page(secs_vaddr);
if (ret) {
sgx_dbg(encl, "ECREATE returned %ld\n", ret);
ret = -EFAULT;
goto out;
}
/* finish initing secs_page: associate EPC page */
encl->secs_page.epc_page = secs_epc;
createp->src = (unsigned long)encl->base; /* DEADCODE ??? */
if (secs->flags & SGX_SECS_A_DEBUG)
encl->flags |= SGX_ENCL_DEBUG;
/* register for the release notification from host */
encl->mmu_notifier.ops = &sgx_mmu_notifier_ops;
ret = mmu_notifier_register(&encl->mmu_notifier, encl->mm);
if (ret) {
encl->mmu_notifier.ops = NULL;
goto out;
}
/* find and double-check previously mmaped (by host app) enclave region */
down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, secs->base);
if (!vma || vma->vm_ops != &sgx_vm_ops ||
vma->vm_start != secs->base ||
vma->vm_end != (secs->base + secs->size)) {
ret = -EINVAL;
up_read(¤t->mm->mmap_sem);
goto out;
}
vma->vm_private_data = encl; /* associate VMA with created enclave */
up_read(¤t->mm->mmap_sem);
/* append enclave to list of enclaves of host app TGID context */
mutex_lock(&sgx_tgid_ctx_mutex);
list_add_tail(&encl->encl_list, &encl->tgid_ctx->encl_list);
mutex_unlock(&sgx_tgid_ctx_mutex);
out:
if (ret && encl)
kref_put(&encl->refcount, sgx_encl_release);
kfree(secs);
return ret;
}
/* SECINFO validation: page-to-add must be TCS/REG, with correct flags, etc. */
static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
{
u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
int i;
if ((secinfo->flags & SGX_SECINFO_RESERVED_MASK) ||
((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R)) ||
(page_type != SGX_SECINFO_TCS &&
page_type != SGX_SECINFO_REG))
return -EINVAL;
for (i = 0; i < sizeof(secinfo->reserved) / sizeof(u64); i++)
if (secinfo->reserved[i])
return -EINVAL;
return 0;
}
/* TCS user-supplied page validation: correct alignments of addresses */
static int sgx_validate_tcs(struct sgx_tcs *tcs)
{
int i;
/* If FLAGS is not zero, ECALL will fail. */
if ((tcs->flags != 0) ||
(tcs->ossa & (PAGE_SIZE - 1)) ||
(tcs->ofsbase & (PAGE_SIZE - 1)) ||
(tcs->ogsbase & (PAGE_SIZE - 1)) ||
((tcs->fslimit & 0xFFF) != 0xFFF) ||
((tcs->gslimit & 0xFFF) != 0xFFF))
return -EINVAL;
for (i = 0; i < sizeof(tcs->reserved) / sizeof(u64); i++)
if (tcs->reserved[i])
return -EINVAL;
return 0;
}
/**
* __encl_add_page - part of handler for SGX_IOC_ENCLAVE_ADD_PAGE
*
* @encl: corresponding enclave
* @encl_page: allocated enclave page that will receive contents of user page
* @addp: add-page struct with src & dst pages' addresses, mrmask, secinfo
* @secinfo: user-supplied SECS
*
* Validates user-supplied structures and prepare objects for worker thread
* sgx_add_page_worker to pick up and perform actual EADD+EEXTEND;
* these objects are temporary copy of user-supplied page in backing RAM page
* and sgx_add_page_req request inited with page-info and added to worker list.
*/
static int __encl_add_page(struct sgx_encl *encl,
struct sgx_encl_page *encl_page,
struct sgx_enclave_add_page *addp,
struct sgx_secinfo *secinfo)
{
u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
unsigned long src = (unsigned long)addp->src;
struct sgx_tcs *tcs;
struct page *backing;
struct sgx_add_page_req *req = NULL;
int ret;
int empty;
void *user_vaddr;
void *tmp_vaddr;
struct page *tmp_page;
/* allocate temporary page to receive contents of user page & be checked */
tmp_page = alloc_page(GFP_HIGHUSER); /* page not yet mapped in kernel AS */
if (!tmp_page)
return -ENOMEM;
/* temporarily map into kernel AS and fill with contents from user page */
tmp_vaddr = kmap(tmp_page);
ret = copy_from_user((void *)tmp_vaddr, (void __user *)src, PAGE_SIZE);
kunmap(tmp_page);
if (ret) {
__free_page(tmp_page);
return -EFAULT;
}
/* validate user-supplied SECINFO (page is TCS/REG, with correct flags) */
if (sgx_validate_secinfo(secinfo)) {
__free_page(tmp_page);
return -EINVAL;
}
/* if page is TCS, validate contents of this page (correct alignments) */
if (page_type == SGX_SECINFO_TCS) {
tcs = (struct sgx_tcs *)kmap(tmp_page);
ret = sgx_validate_tcs(tcs);
kunmap(tmp_page);
if (ret) {
__free_page(tmp_page);
return ret;
}
}
/* init enclave page: find & assign corresponding VA slot + set address */
ret = sgx_init_page(encl, encl_page, addp->addr);
if (ret) {
__free_page(tmp_page);
return -EINVAL;
}
mutex_lock(&encl->lock);
/* maybe cannot add pages anymore? */
if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD)) {
ret = -EINVAL;
goto out;
}
/* is page with same address already in enclave? */
if (radix_tree_lookup(&encl->page_tree, addp->addr >> PAGE_SHIFT)) {
ret = -EEXIST;
goto out;
}
/* allocate request object to be EADDed later by sgx_add_page_worker */
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
goto out;
}
/* find backing RAM page for our enclave page (used as tmp storage) */
backing = sgx_get_backing(encl, encl_page, false);
if (IS_ERR((void *)backing)) {
ret = PTR_ERR((void *)backing);
goto out;
}
/* register enclave page in enclave */
ret = radix_tree_insert(&encl->page_tree, encl_page->addr >> PAGE_SHIFT,
encl_page);
if (ret) {
sgx_put_backing(backing, false /* write */);
goto out;
}
/* copy user-supplied page to backing storage (fetched later by sgx_add_page_worker) */
user_vaddr = kmap(backing);
tmp_vaddr = kmap(tmp_page);
memcpy(user_vaddr, tmp_vaddr, PAGE_SIZE);
kunmap(backing);
kunmap(tmp_page);
if (page_type == SGX_SECINFO_TCS)
encl_page->flags |= SGX_ENCL_PAGE_TCS;
/* init request object with info needed for EADD */
memcpy(&req->secinfo, secinfo, sizeof(*secinfo));
req->encl = encl;
req->encl_page = encl_page;
req->mrmask = addp->mrmask;
empty = list_empty(&encl->add_page_reqs);
kref_get(&encl->refcount);
/* add request to enclave's list of requests & start worker if necessary */
list_add_tail(&req->list, &encl->add_page_reqs);
if (empty)
queue_work(sgx_add_page_wq, &encl->add_page_work);
sgx_put_backing(backing, true /* write */);
out:
if (ret) {
kfree(req);
sgx_free_va_slot(encl_page->va_page,
encl_page->va_offset);
}
mutex_unlock(&encl->lock);
__free_page(tmp_page);
return ret;
}
/**
* sgx_ioc_enclave_add_page - handler for SGX_IOC_ENCLAVE_ADD_PAGE
*
* @filep: open file to /dev/sgx
* @cmd: the command value
* @arg: pointer to the struct sgx_enclave_add_page
*
* Creates meta-data for an enclave page and enqueues ENCLS(EADD) that will
* be processed by a worker thread later on.
*/
static long sgx_ioc_enclave_add_page(struct file *filep, unsigned int cmd,
unsigned long arg)
{
struct sgx_enclave_add_page *addp = (void *)arg;
unsigned long secinfop = (unsigned long)addp->secinfo;
struct sgx_encl *encl;
struct sgx_encl_page *page;
struct sgx_secinfo secinfo;
int ret;
/* destination address in ELRANGE must be 4KB aligned */
if (addp->addr & (PAGE_SIZE - 1))
return -EINVAL;
if (copy_from_user(&secinfo, (void __user *)secinfop, sizeof(secinfo)))
return -EFAULT;
/* find and kref_get enclave corresponding to destination address */
ret = sgx_find_and_get_encl(addp->addr, &encl);
if (ret)
return ret;
/* sanity check: found enclave does not contain destination address */
if (addp->addr < encl->base ||
addp->addr > (encl->base + encl->size - PAGE_SIZE)) {
kref_put(&encl->refcount, sgx_encl_release);
return -EINVAL;
}
/* allocate enclave page that will receive contents of src user page */
page = kzalloc(sizeof(*page), GFP_KERNEL);
if (!page) {
kref_put(&encl->refcount, sgx_encl_release);
return -ENOMEM;
}
ret = __encl_add_page(encl, page, addp, &secinfo);
kref_put(&encl->refcount, sgx_encl_release);
if (ret)
kfree(page);
return ret;
}
/**
* __sgx_encl_init - part of handler for SGX_IOC_ENCLAVE_INIT
*
* @encl: to-initialize enclave
* @sigstruct: kernel address of user-supplied SIGSTRUCT
* @einittoken: kernel address of user-supplied EINITTOKEN
*
* Calls EINIT, retries several times on failure.
*/
static int __sgx_encl_init(struct sgx_encl *encl, char *sigstruct,
struct sgx_einittoken *einittoken)
{
int ret = SGX_UNMASKED_EVENT; /* if EINIT returns this, should retry */
struct sgx_epc_page *secs_epc = encl->secs_page.epc_page;
void *secs_va = NULL;
int i;
int j;