L4Re Operating System Framework
Interface and Usage Documentation
Loading...
Searching...
No Matches
vfs_impl.h
1/*
2 * (c) 2008-2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3 * Alexander Warg <warg@os.inf.tu-dresden.de>,
4 * Björn Döbel <doebel@os.inf.tu-dresden.de>
5 * economic rights: Technische Universität Dresden (Germany)
6 *
7 * License: see LICENSE.spdx (in this directory or the directories above)
8 */
9
10#include "fd_store.h"
11#include "vcon_stream.h"
12#include "ns_fs.h"
13
14#include <l4/bid_config.h>
15#include <l4/re/env>
16#include <l4/re/rm>
17#include <l4/re/dataspace>
18#include <l4/sys/assert.h>
19#include <l4/cxx/hlist>
20#include <l4/cxx/pair>
21#include <l4/cxx/std_alloc>
22
23#include <l4/l4re_vfs/backend>
24#include <l4/re/shared_cap>
25
26#include <unistd.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <sys/uio.h>
30#include <sys/mman.h>
31
32#if 0
33#include <l4/sys/kdebug.h>
34static int debug_mmap = 1;
35#define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
36#else
37#define DEBUG_LOG(level, dbg...) do { } while (0)
38#endif
39
45#define USE_BIG_ANON_DS
46
47using L4Re::Rm;
48
49namespace {
50
51using cxx::Ref_ptr;
52
53class Fd_store : public L4Re::Core::Fd_store
54{
55public:
56 Fd_store() noexcept;
57};
58
59// for internal Vcon_streams we want to have a placement new operator, so
60// inherit and add one
61class Std_stream : public L4Re::Core::Vcon_stream
62{
63public:
64 Std_stream(L4::Cap<L4::Vcon> c) : L4Re::Core::Vcon_stream(c) {}
65};
66
67Fd_store::Fd_store() noexcept
68{
69 // use this strange way to prevent deletion of the stdio object
70 // this depends on Fd_store to being a singleton !!!
71 static char m[sizeof(Std_stream)] __attribute__((aligned(sizeof(long))));
72 if (auto log = L4Re::Env::env()->log())
73 {
74 Std_stream *s = new (m) Std_stream(log);
75 set(0, cxx::ref_ptr(s)); // stdin
76 set(1, cxx::ref_ptr(s)); // stdout
77 set(2, cxx::ref_ptr(s)); // stderr
78
79 // make sure that we never delete the static io stream thing
80 s->add_ref();
81 }
82}
83
84class Root_mount_tree : public L4Re::Vfs::Mount_tree
85{
86public:
87 Root_mount_tree() : L4Re::Vfs::Mount_tree(0) {}
88 void operator delete (void *) {}
89};
90
91class Vfs : public L4Re::Vfs::Ops
92{
93private:
94 bool _early_oom;
95
96public:
97 Vfs()
98 : _early_oom(true), _root_mount(), _root(L4Re::Env::env())
99 {
100 _root_mount.add_ref();
101 _root.add_ref();
102 _root_mount.mount(cxx::ref_ptr(&_root));
103 _cwd = cxx::ref_ptr(&_root);
104
105#if 0
106 Ref_ptr<L4Re::Vfs::File> rom;
107 _root.openat("rom", 0, 0, &rom);
108
109 _root_mount.create_tree("lib/foo", rom);
110
111 _root.openat("lib", 0, 0, &_cwd);
112
113#endif
114 }
115
116 int alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept override;
117 Ref_ptr<L4Re::Vfs::File> free_fd(int fd) noexcept override;
118 Ref_ptr<L4Re::Vfs::File> get_root() noexcept override;
119 Ref_ptr<L4Re::Vfs::File> get_cwd() noexcept override;
120 void set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
121 Ref_ptr<L4Re::Vfs::File> get_file(int fd) noexcept override;
122 cxx::Pair<Ref_ptr<L4Re::Vfs::File>, int>
123 set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) noexcept
124 override;
125
126 int mmap2(void *start, size_t len, int prot, int flags, int fd,
127 off_t offset, void **ptr) noexcept override;
128
129 int munmap(void *start, size_t len) noexcept override;
130 int mremap(void *old, size_t old_sz, size_t new_sz, int flags,
131 void **new_addr) noexcept override;
132 int mprotect(const void *a, size_t sz, int prot) noexcept override;
133 int msync(void *addr, size_t len, int flags) noexcept override;
134 int madvise(void *addr, size_t len, int advice) noexcept override;
135
136 int register_file_system(L4Re::Vfs::File_system *f) noexcept override;
137 int unregister_file_system(L4Re::Vfs::File_system *f) noexcept override;
138 L4Re::Vfs::File_system *get_file_system(char const *fstype) noexcept override;
139 L4Re::Vfs::File_system_list file_system_list() noexcept override;
140
141 int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
142 int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
143 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) noexcept override;
144 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) noexcept override;
145 int mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
146
147 void operator delete (void *) {}
148
149 void *malloc(size_t size) noexcept override { return Vfs_config::malloc(size); }
150 void free(void *m) noexcept override { Vfs_config::free(m); }
151
152private:
153 Root_mount_tree _root_mount;
154 L4Re::Core::Env_dir _root;
155 Ref_ptr<L4Re::Vfs::File> _cwd;
156 Fd_store fds;
157
158 L4Re::Vfs::File_system *_fs_registry;
159
160 struct File_factory_item : cxx::H_list_item_t<File_factory_item>
161 {
162 cxx::Ref_ptr<L4Re::Vfs::File_factory> f;
163 explicit File_factory_item(cxx::Ref_ptr<L4Re::Vfs::File_factory> const &f)
164 : f(f) {};
165
166 File_factory_item() = default;
167 File_factory_item(File_factory_item const &) = delete;
168 File_factory_item &operator = (File_factory_item const &) = delete;
169 };
170
171 cxx::H_list_t<File_factory_item> _file_factories;
172
173 l4_addr_t _anon_offset;
175
176 int alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds);
177 int alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
178 l4_addr_t *offset);
179
180 void align_mmap_start_and_length(void **start, size_t *length);
181 int munmap_regions(void *start, size_t len);
182
183 L4Re::Vfs::File_system *find_fs_from_type(char const *fstype) noexcept;
184};
185
186static inline bool strequal(char const *a, char const *b)
187{
188 for (;*a && *a == *b; ++a, ++b)
189 ;
190 return *a == *b;
191}
192
193int
194Vfs::register_file_system(L4Re::Vfs::File_system *f) noexcept
195{
196 using L4Re::Vfs::File_system;
197
198 if (!f)
199 return -EINVAL;
200
201 for (File_system *c = _fs_registry; c; c = c->next())
202 if (strequal(c->type(), f->type()))
203 return -EEXIST;
204
205 f->next(_fs_registry);
206 _fs_registry = f;
207
208 return 0;
209}
210
211int
212Vfs::unregister_file_system(L4Re::Vfs::File_system *f) noexcept
213{
214 using L4Re::Vfs::File_system;
215
216 if (!f)
217 return -EINVAL;
218
219 File_system **p = &_fs_registry;
220
221 for (; *p; p = &(*p)->next())
222 if (*p == f)
223 {
224 *p = f->next();
225 f->next() = 0;
226 return 0;
227 }
228
229 return -ENOENT;
230}
231
232L4Re::Vfs::File_system *
233Vfs::find_fs_from_type(char const *fstype) noexcept
234{
235 L4Re::Vfs::File_system_list fsl(_fs_registry);
236 for (L4Re::Vfs::File_system_list::Iterator c = fsl.begin();
237 c != fsl.end(); ++c)
238 if (strequal(c->type(), fstype))
239 return *c;
240 return 0;
241}
242
243L4Re::Vfs::File_system_list
244Vfs::file_system_list() noexcept
245{
246 return L4Re::Vfs::File_system_list(_fs_registry);
247}
248
249L4Re::Vfs::File_system *
250Vfs::get_file_system(char const *fstype) noexcept
251{
252 L4Re::Vfs::File_system *fs;
253 if ((fs = find_fs_from_type(fstype)))
254 return fs;
255
256 // Try to load a file system module dynamically
257 int res = Vfs_config::load_module(fstype);
258 if (res < 0)
259 return 0;
260
261 // Try again
262 return find_fs_from_type(fstype);
263}
264
265int
266Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
267{
268 if (!f)
269 return -EINVAL;
270
271 void *x = this->malloc(sizeof(File_factory_item));
272 if (!x)
273 return -ENOMEM;
274
275 auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
276 _file_factories.push_front(ff);
277 return 0;
278}
279
280int
281Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
282{
283 for (auto p: _file_factories)
284 {
285 if (p->f == f)
286 {
287 _file_factories.remove(p);
288 p->~File_factory_item();
289 this->free(p);
290 return 0;
291 }
292 }
293 return -ENOENT;
294}
295
296Ref_ptr<L4Re::Vfs::File_factory>
297Vfs::get_file_factory(int proto) noexcept
298{
299 for (auto p: _file_factories)
300 if (p->f->proto() == proto)
301 return p->f;
302
303 return Ref_ptr<L4Re::Vfs::File_factory>();
304}
305
306Ref_ptr<L4Re::Vfs::File_factory>
307Vfs::get_file_factory(char const *proto_name) noexcept
308{
309 for (auto p: _file_factories)
310 {
311 auto n = p->f->proto_name();
312 if (n)
313 {
314 char const *a = n;
315 char const *b = proto_name;
316 for (; *a && *b && *a == *b; ++a, ++b)
317 ;
318
319 if ((*a == 0) && (*b == 0))
320 return p->f;
321 }
322 }
323
324 return Ref_ptr<L4Re::Vfs::File_factory>();
325}
326
327int
328Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept
329{
330 int fd = fds.alloc();
331 if (fd < 0)
332 return -EMFILE;
333
334 if (f)
335 fds.set(fd, f);
336
337 return fd;
338}
339
340Ref_ptr<L4Re::Vfs::File>
341Vfs::free_fd(int fd) noexcept
342{
343 Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
344
345 if (!f)
346 return Ref_ptr<>::Nil;
347
348 fds.free(fd);
349 return f;
350}
351
352
353Ref_ptr<L4Re::Vfs::File>
354Vfs::get_root() noexcept
355{
356 return cxx::ref_ptr(&_root);
357}
358
359Ref_ptr<L4Re::Vfs::File>
360Vfs::get_cwd() noexcept
361{
362 return _cwd;
363}
364
365void
366Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
367{
368 // FIXME: check for is dir
369 if (dir)
370 _cwd = dir;
371}
372
373Ref_ptr<L4Re::Vfs::File>
374Vfs::get_file(int fd) noexcept
375{
376 return fds.get(fd);
377}
378
379cxx::Pair<Ref_ptr<L4Re::Vfs::File>, int>
380Vfs::set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f) noexcept
381{
382 if (!fds.check_fd(fd))
383 return cxx::pair(Ref_ptr<L4Re::Vfs::File>(Ref_ptr<>::Nil), EBADF);
384
385 Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
386 fds.set(fd, f);
387 return cxx::pair(old, 0);
388}
389
390
391#define GET_FILE_DBG(fd, err) \
392 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
393 if (!fi) \
394 { \
395 return -err; \
396 }
397
398#define GET_FILE(fd, err) \
399 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
400 if (!fi) \
401 return -err;
402
403void
404Vfs::align_mmap_start_and_length(void **start, size_t *length)
405{
406 l4_addr_t const s = reinterpret_cast<l4_addr_t>(*start);
407 size_t const o = s & (L4_PAGESIZE - 1);
408
409 *start = reinterpret_cast<void *>(l4_trunc_page(s));
410 *length = l4_round_page(*length + o);
411}
412
413int
414Vfs::munmap_regions(void *start, size_t len)
415{
416 using namespace L4;
417 using namespace L4Re;
418
419 int err;
420 Cap<Dataspace> ds;
421 Cap<Rm> r = Env::env()->rm();
422
423 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
424 return -EINVAL;
425
426 align_mmap_start_and_length(&start, &len);
427
428 while (1)
429 {
430 DEBUG_LOG(debug_mmap, {
431 l4_kd_outstring("DETACH: start = 0x");
433 l4_kd_outstring(" len = 0x");
434 l4_kd_outhex32(len);
435 l4_kd_outstring("\n");
436 });
437 err = r->detach(l4_addr_t(start), len, &ds, This_task);
438 if (err < 0)
439 return err;
440
441 switch (err & Rm::Detach_result_mask)
442 {
443 case Rm::Split_ds:
444 if (ds.is_valid())
445 L4Re::virt_cap_alloc->take(ds);
446 return 0;
447 case Rm::Detached_ds:
448 if (ds.is_valid())
449 L4Re::virt_cap_alloc->release(ds);
450 break;
451 default:
452 break;
453 }
454
455 if (!(err & Rm::Detach_again))
456 return 0;
457 }
458}
459
460int
461Vfs::munmap(void *start, size_t len) L4_NOTHROW
462{
463 using namespace L4;
464 using namespace L4Re;
465
466 int err = 0;
467 Cap<Rm> r = Env::env()->rm();
468
469 // Fields for obtaining a list of areas for the calling process
470 long area_cnt = -1; // No. of areas in this process
471 Rm::Area const *area_array;
472 bool matches_area = false; // true if unmap parameters match an area
473
474 // First check if there are any areas matching the munmap request. Those
475 // might have been created by an mmap call using PROT_NONE as protection
476 // modifier.
477
478 area_cnt = r->get_areas((l4_addr_t) start, &area_array);
479
480 // It is enough to check for the very first entry, since get_areas will
481 // only return areas with a starting address equal or greater to <start>.
482 // However, we intend to unmap at most the area starting exactly at
483 // <start>.
484 if (area_cnt > 0)
485 {
486 size_t area_size = area_array[0].end - area_array[0].start + 1;
487
488 // Only free the area if the munmap parameters describe it exactly.
489 if (area_array[0].start == (l4_addr_t) start && area_size == len)
490 {
491 r->free_area((l4_addr_t) start);
492 matches_area = true;
493 }
494 }
495
496 // After clearing possible area reservations from PROT_NONE mappings, clear
497 // any regions in the address range specified. Note that errors shall be
498 // suppressed if an area was freed but no regions were found.
499 err = munmap_regions(start, len);
500 if (err == -ENOENT && matches_area)
501 return 0;
502
503 return err;
504}
505
506int
507Vfs::alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds)
508{
509 *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
510
511 if (!ds->is_valid())
512 return -ENOMEM;
513
514 int err;
515 if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
516 return err;
517
518 DEBUG_LOG(debug_mmap, {
519 l4_kd_outstring("ANON DS ALLOCATED: size=");
520 l4_kd_outhex32(size);
521 l4_kd_outstring(" cap = 0x");
522 l4_kd_outhex32(ds->cap());
523 l4_kd_outstring("\n");
524 });
525
526 return 0;
527}
528
529int
530Vfs::alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
531 l4_addr_t *offset)
532{
533#if !defined(CONFIG_MMU)
534 // Small values for !MMU systems. These platforms do not have much memory
535 // typically and the memory must be instantly allocated.
536 enum
537 {
538 ANON_MEM_DS_POOL_SIZE = 256UL << 10, // size of a pool dataspace used for anon memory
539 ANON_MEM_MAX_SIZE = 32UL << 10, // chunk size that will be allocate a dataspace
540 };
541#elif defined(USE_BIG_ANON_DS)
542 enum
543 {
544 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
545 ANON_MEM_MAX_SIZE = 32UL << 20, // chunk size that will be allocate a dataspace
546 };
547#else
548 enum
549 {
550 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
551 ANON_MEM_MAX_SIZE = 0UL << 20, // chunk size that will be allocate a dataspace
552 };
553#endif
554
555 if (size >= ANON_MEM_MAX_SIZE)
556 {
557 int err;
558 if ((err = alloc_ds(size, ds)) < 0)
559 return err;
560
561 *offset = 0;
562
563 if (!_early_oom)
564 return err;
565
566 return (*ds)->allocate(0, size);
567 }
568
569 if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
570 {
571 int err;
572 if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
573 return err;
574
575 _anon_offset = 0;
576 _anon_ds = *ds;
577 }
578 else
579 *ds = _anon_ds;
580
581 if (_early_oom)
582 {
583 if (int err = (*ds)->allocate(_anon_offset, size))
584 return err;
585 }
586
587 *offset = _anon_offset;
588 _anon_offset += size;
589 return 0;
590}
591
592int
593Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t page4k_offset,
594 void **resptr) L4_NOTHROW
595{
596 DEBUG_LOG(debug_mmap, {
597 l4_kd_outstring("MMAP params: ");
598 l4_kd_outstring("start = 0x");
600 l4_kd_outstring(", len = 0x");
601 l4_kd_outhex32(len);
602 l4_kd_outstring(", prot = 0x");
603 l4_kd_outhex32(prot);
604 l4_kd_outstring(", flags = 0x");
605 l4_kd_outhex32(flags);
606 l4_kd_outstring(", offset = 0x");
607 l4_kd_outhex32(page4k_offset);
608 l4_kd_outstring("\n");
609 });
610
611 using namespace L4Re;
612 off64_t offset = l4_trunc_page(page4k_offset << 12);
613
614 if (flags & MAP_FIXED)
615 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
616 return -EINVAL;
617
618 align_mmap_start_and_length(&start, &len);
619
620 // special code to just reserve an area of the virtual address space
621 // Same behavior should be exposed when mapping with PROT_NONE. Mind that
622 // PROT_NONE can only be specified exclusively, since it is defined to 0x0.
623 if ((flags & 0x1000000) || (prot == PROT_NONE))
624 {
625 int err;
626 L4::Cap<Rm> r = Env::env()->rm();
627 l4_addr_t area = reinterpret_cast<l4_addr_t>(start);
628 err = r->reserve_area(&area, len, L4Re::Rm::F::Search_addr);
629 if (err < 0)
630 return err;
631
632 *resptr = reinterpret_cast<void*>(area);
633
634 DEBUG_LOG(debug_mmap, {
635 l4_kd_outstring(" MMAP reserved area: 0x");
636 l4_kd_outhex32(area);
637 l4_kd_outstring(" length= 0x");
638 l4_kd_outhex32(len);
639 l4_kd_outstring("\n");
640 });
641
642 return 0;
643 }
644
646 l4_addr_t anon_offset = 0;
647 L4Re::Rm::Flags rm_flags(0);
648
649 if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
650 {
651 rm_flags |= L4Re::Rm::F::Detach_free;
652
653 int err = alloc_anon_mem(len, &ds, &anon_offset);
654 if (err)
655 return err;
656
657 DEBUG_LOG(debug_mmap, {
658 l4_kd_outstring(" USE ANON MEM: 0x");
659 l4_kd_outhex32(ds.cap());
660 l4_kd_outstring(" offs = 0x");
661 l4_kd_outhex32(anon_offset);
662 l4_kd_outstring("\n");
663 });
664 }
665
666 char const *region_name = "[unknown]";
667 l4_addr_t file_offset = 0;
668 if (!(flags & MAP_ANONYMOUS))
669 {
670 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
671 if (!fi)
672 return -EBADF;
673
674 region_name = fi->path();
675
676 L4::Cap<L4Re::Dataspace> fds = fi->data_space();
677
678 if (!fds.is_valid())
679 return -EINVAL;
680
681 if (len + offset > l4_round_page(fds->size()))
682 return -EINVAL;
683
684 if (flags & MAP_PRIVATE)
685 {
686 DEBUG_LOG(debug_mmap, l4_kd_outstring("COW\n"););
687 int err = ds->copy_in(anon_offset, fds, offset, len);
688 file_offset = offset;
689 if (err == -L4_EINVAL)
690 {
691 L4::Cap<Rm> r = Env::env()->rm();
694 err = r->attach(&src, len,
696 fds, offset);
697 if (err < 0)
698 return err;
699
700 err = r->attach(&dst, len,
702 ds.get(), anon_offset);
703 if (err < 0)
704 return err;
705
706 memcpy(dst.get(), src.get(), len);
707
708 region_name = "[mmap-private]";
709 file_offset = (unsigned long)dst.get();
710 }
711 else if (err)
712 return err;
713
714 offset = anon_offset;
715 }
716 else
717 {
718 L4Re::virt_cap_alloc->take(fds);
719 ds = L4Re::Shared_cap<L4Re::Dataspace>(fds, L4Re::virt_cap_alloc);
720 }
721 }
722 else
723 {
724 offset = anon_offset;
725 region_name = "[anon]";
726 file_offset = offset;
727 }
728
729
730 if (!(flags & MAP_FIXED) && start == 0)
731 start = reinterpret_cast<void*>(L4_PAGESIZE);
732
733 char *data = static_cast<char *>(start);
734 L4::Cap<Rm> r = Env::env()->rm();
735 l4_addr_t overmap_area = L4_INVALID_ADDR;
736
737 int err;
738 if (flags & MAP_FIXED)
739 {
740 overmap_area = l4_addr_t(start);
741
742 err = r->reserve_area(&overmap_area, len);
743 if (err < 0)
744 overmap_area = L4_INVALID_ADDR;
745
746 rm_flags |= Rm::F::In_area;
747
748 // Make sure to remove old mappings residing at the respective address
749 // range. If none exists, we are fine as well, allowing us to ignore
750 // ENOENT here.
751 err = munmap_regions(start, len);
752 if (err && err != -ENOENT)
753 return err;
754 }
755
756 if (!(flags & MAP_FIXED))
757 rm_flags |= Rm::F::Search_addr;
758 if (prot & PROT_READ)
759 rm_flags |= Rm::F::R;
760 if (prot & PROT_WRITE)
761 rm_flags |= Rm::F::W;
762 if (prot & PROT_EXEC)
763 rm_flags |= Rm::F::X;
764
765 err = r->attach(&data, len, rm_flags,
766 L4::Ipc::make_cap(ds.get(), (prot & PROT_WRITE)
770 region_name, file_offset);
771
772 DEBUG_LOG(debug_mmap, {
773 l4_kd_outstring(" MAPPED: 0x");
774 l4_kd_outhex32(ds.cap());
775 l4_kd_outstring(" addr: 0x");
777 l4_kd_outstring(" bytes: 0x");
778 l4_kd_outhex32(len);
779 l4_kd_outstring(" offset: 0x");
780 l4_kd_outhex32(offset);
781 l4_kd_outstring(" err = ");
782 l4_kd_outdec(err);
783 l4_kd_outstring("\n");
784 });
785
786
787 if (overmap_area != L4_INVALID_ADDR)
788 r->free_area(overmap_area);
789
790 if (err < 0)
791 return err;
792
793 l4_assert (!(start && !data));
794
795 // release ownership of the attached DS
796 ds.release();
797 *resptr = data;
798
799 return 0;
800}
801
802namespace {
803 class Auto_area
804 {
805 public:
806 L4::Cap<L4Re::Rm> r;
807 l4_addr_t a;
808
809 explicit Auto_area(L4::Cap<L4Re::Rm> r, l4_addr_t a = L4_INVALID_ADDR)
810 : r(r), a(a) {}
811
812 int reserve(l4_addr_t _a, l4_size_t sz, L4Re::Rm::Flags flags)
813 {
814 free();
815 a = _a;
816 int e = r->reserve_area(&a, sz, flags);
817 if (e)
818 a = L4_INVALID_ADDR;
819 return e;
820 }
821
822 void free()
823 {
824 if (is_valid())
825 {
826 r->free_area(a);
827 a = L4_INVALID_ADDR;
828 }
829 }
830
831 bool is_valid() const { return a != L4_INVALID_ADDR; }
832
833 ~Auto_area() { free(); }
834 };
835}
836
837int
838Vfs::mremap(void *old_addr, size_t old_size, size_t new_size, int flags,
839 void **new_addr) L4_NOTHROW
840{
841 using namespace L4Re;
842
843 DEBUG_LOG(debug_mmap, {
844 l4_kd_outstring("Mremap: addr = 0x");
845 l4_kd_outhex32((l4_umword_t)old_addr);
846 l4_kd_outstring(" old_size = 0x");
847 l4_kd_outhex32(old_size);
848 l4_kd_outstring(" new_size = 0x");
849 l4_kd_outhex32(new_size);
850 l4_kd_outstring("\n");
851 });
852
853 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
854 return -EINVAL;
855
856 l4_addr_t oa = l4_trunc_page(reinterpret_cast<l4_addr_t>(old_addr));
857 if (oa != reinterpret_cast<l4_addr_t>(old_addr))
858 return -EINVAL;
859
860 bool const fixed = flags & MREMAP_FIXED;
861 bool const maymove = flags & MREMAP_MAYMOVE;
862
863 L4::Cap<Rm> r = Env::env()->rm();
864
865 // sanitize input parameters to multiples of pages
866 old_size = l4_round_page(old_size);
867 new_size = l4_round_page(new_size);
868
869 if (!fixed)
870 {
871 if (new_size < old_size)
872 {
873 *new_addr = old_addr;
874 return munmap(reinterpret_cast<void*>(oa + new_size),
875 old_size - new_size);
876 }
877
878 if (new_size == old_size)
879 {
880 *new_addr = old_addr;
881 return 0;
882 }
883 }
884
885 Auto_area old_area(r);
886 int err = old_area.reserve(oa, old_size, L4Re::Rm::Flags(0));
887 if (err < 0)
888 return -EINVAL;
889
890 l4_addr_t pad_addr;
891 Auto_area new_area(r);
892 if (fixed)
893 {
894 l4_addr_t na = l4_trunc_page(reinterpret_cast<l4_addr_t>(*new_addr));
895 if (na != reinterpret_cast<l4_addr_t>(*new_addr))
896 return -EINVAL;
897
898 // check if the current virtual memory area can be expanded
899 int err = new_area.reserve(na, new_size, L4Re::Rm::Flags(0));
900 if (err < 0)
901 return err;
902
903 pad_addr = na;
904 // unmap all stuff and remap ours ....
905 }
906 else
907 {
908 l4_addr_t ta = oa + old_size;
909 unsigned long ts = new_size - old_size;
910 // check if the current virtual memory area can be expanded
911 long err = new_area.reserve(ta, ts, L4Re::Rm::Flags(0));
912 if (!maymove && err)
913 return -ENOMEM;
914
915 L4Re::Rm::Offset toffs;
916 L4Re::Rm::Flags tflags;
917 L4::Cap<L4Re::Dataspace> tds;
918
919 err = r->find(&ta, &ts, &toffs, &tflags, &tds);
920
921 // there is enough space to expand the mapping in place
922 if (err == -ENOENT || (err == 0 && (tflags & Rm::F::In_area)))
923 {
924 old_area.free(); // pad at the original address
925 pad_addr = oa + old_size;
926 *new_addr = old_addr;
927 }
928 else if (!maymove)
929 return -ENOMEM;
930 else
931 {
932 // search for a new area to remap
933 err = new_area.reserve(0, new_size, Rm::F::Search_addr);
934 if (err < 0)
935 return -ENOMEM;
936
937 pad_addr = new_area.a + old_size;
938 *new_addr = reinterpret_cast<void *>(new_area.a);
939 }
940 }
941
942 if (old_area.is_valid())
943 {
944 unsigned long size = old_size;
945
946 l4_addr_t a = old_area.a;
947 unsigned long s = 1;
948 L4Re::Rm::Offset o;
949 L4Re::Rm::Flags f;
950 L4::Cap<L4Re::Dataspace> ds;
951
952 while (r->find(&a, &s, &o, &f, &ds) >= 0 && !(f & Rm::F::In_area))
953 {
954 if (a < old_area.a)
955 {
956 auto d = old_area.a - a;
957 a = old_area.a;
958 s -= d;
959 o += d;
960 }
961
962 if (a + s > old_area.a + old_size)
963 s = old_area.a + old_size - a;
964
965 l4_addr_t x = a - old_area.a + new_area.a;
966
967 int err = r->attach(&x, s, Rm::F::In_area | f,
968 L4::Ipc::make_cap(ds, f.cap_rights()), o);
969 if (err < 0)
970 return err;
971
972 // count the new attached ds reference
973 L4Re::virt_cap_alloc->take(ds);
974
975 err = r->detach(a, s, &ds, This_task,
977 if (err < 0)
978 return err;
979
980 switch (err & Rm::Detach_result_mask)
981 {
982 case Rm::Split_ds:
983 // add a reference as we split up a mapping
984 if (ds.is_valid())
985 L4Re::virt_cap_alloc->take(ds);
986 break;
987 case Rm::Detached_ds:
988 if (ds.is_valid())
989 L4Re::virt_cap_alloc->release(ds);
990 break;
991 default:
992 break;
993 }
994
995 if (size <= s)
996 break;
997 a += s;
998 size -= s;
999 s = 1;
1000 }
1001
1002 old_area.free();
1003 }
1004
1005 if (old_size < new_size)
1006 {
1007 l4_addr_t const pad_sz = new_size - old_size;
1008 l4_addr_t toffs;
1010 int err = alloc_anon_mem(pad_sz, &tds, &toffs);
1011 if (err)
1012 return err;
1013
1014 // FIXME: must get the protection rights from the old
1015 // mapping and use the same here, for now just use RWX
1016 err = r->attach(&pad_addr, pad_sz,
1018 L4::Ipc::make_cap_rw(tds.get()), toffs);
1019 if (err < 0)
1020 return err;
1021
1022 // release ownership of tds, the region map is now the new owner
1023 tds.release();
1024 }
1025
1026 return 0;
1027}
1028
1029int
1030Vfs::mprotect(const void * /* a */, size_t /* sz */, int prot) L4_NOTHROW
1031{
1032 return (prot & PROT_WRITE) ? -ENOSYS : 0;
1033}
1034
1035int
1036Vfs::msync(void *, size_t, int) L4_NOTHROW
1037{ return 0; }
1038
1039int
1040Vfs::madvise(void *, size_t, int) L4_NOTHROW
1041{ return 0; }
1042
1043}
1044
1045L4Re::Vfs::Ops *__rtld_l4re_env_posix_vfs_ops;
1046extern void *l4re_env_posix_vfs_ops __attribute__((alias("__rtld_l4re_env_posix_vfs_ops"), visibility("default")));
1047
1048namespace {
1049 class Real_mount_tree : public L4Re::Vfs::Mount_tree
1050 {
1051 public:
1052 explicit Real_mount_tree(char *n) : Mount_tree(n) {}
1053
1054 void *operator new (size_t size)
1055 { return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
1056
1057 void operator delete (void *mem)
1058 { __rtld_l4re_env_posix_vfs_ops->free(mem); }
1059 };
1060}
1061
1063int
1064Vfs::mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
1065{
1066 using L4Re::Vfs::File;
1067 using L4Re::Vfs::Mount_tree;
1068 using L4Re::Vfs::Path;
1069
1070 cxx::Ref_ptr<Mount_tree> root = get_root()->mount_tree();
1071 if (!root)
1072 return -EINVAL;
1073
1075 Path p = root->lookup(Path(path), &base);
1076
1077 while (!p.empty())
1078 {
1079 Path f = p.strip_first();
1080
1081 if (f.empty())
1082 return -EEXIST;
1083
1084 char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
1085 if (!name)
1086 return -ENOMEM;
1087
1088 auto nt = cxx::make_ref_obj<Real_mount_tree>(name);
1089 if (!nt)
1090 {
1091 __rtld_l4re_env_posix_vfs_ops->free(name);
1092 return -ENOMEM;
1093 }
1094
1095 base->add_child_node(nt);
1096 base = nt;
1097
1098 if (p.empty())
1099 {
1100 nt->mount(dir);
1101 return 0;
1102 }
1103 }
1104
1105 return -EINVAL;
1106}
1107
1108#undef DEBUG_LOG
1109#undef GET_FILE_DBG
1110#undef GET_FILE
static Env const * env() noexcept
Returns the initial environment for the current task.
Definition env:96
Unique region.
Definition rm:435
Region map.
Definition rm:84
@ Detached_ds
Detached data sapce.
Definition rm:91
@ Detach_again
Detached data space, more to do.
Definition rm:96
@ Split_ds
Splitted data space, and done.
Definition rm:93
@ Detach_exact
Do an unmap of the exact region given.
Definition rm:226
@ Detach_keep
Do not free the detached data space, ignore the F::Detach_free.
Definition rm:246
The basic interface for an open POSIX file.
Definition vfs.h:461
Interface for the POSIX backends of an application.
Definition vfs.h:1110
l4_cap_idx_t cap() const noexcept
Return capability selector.
Definition capability.h:49
bool is_valid() const noexcept
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
Definition capability.h:57
@ Invalid
Invalid capability selector.
Definition capability.h:42
T get() const noexcept
Return the address.
Definition rm:508
A reference-counting pointer with automatic cleanup.
Definition ref_ptr:71
Dataspace interface.
Environment interface.
unsigned int l4_size_t
Unsigned size type.
Definition l4int.h:22
unsigned long l4_umword_t
Unsigned machine word.
Definition l4int.h:40
unsigned long l4_addr_t
Address type.
Definition l4int.h:34
@ L4_EINVAL
Invalid argument.
Definition err.h:47
@ L4_CAP_FPAGE_RO
Read right for capability flexpages.
Definition __l4_fpage.h:176
@ L4_CAP_FPAGE_RW
Read and interface specific 'W' right for capability flexpages.
Definition __l4_fpage.h:192
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
Definition consts.h:446
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
Definition consts.h:471
#define L4_PAGESIZE
Minimal page size (in bytes).
Definition consts.h:389
#define L4_PAGESHIFT
Size of a page, log2-based.
Definition consts.h:26
@ L4_INVALID_ADDR
Invalid address.
Definition consts.h:503
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
Definition compiler.h:161
Functionality for invoking the kernel debugger.
void l4_kd_outdec(l4_mword_t number)
Output a decimal unsigned machine word via the kernel debugger.
Definition kdebug.h:334
void l4_kd_outhex32(l4_uint32_t number)
Output a 32-bit unsigned hexadecimal number via the kernel debugger.
Definition kdebug.h:284
void l4_kd_outstring(char const *text)
Output a string via the kernel debugger.
Definition kdebug.h:237
L4::Detail::Shared_cap_impl< T, L4Re::Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
Definition shared_cap:33
Shared_cap< T > make_shared_cap(L4Re::Cap_alloc *ca)
Allocate a capability slot and wrap it in a Shared_cap.
Definition shared_cap:50
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights) noexcept
Make an L4::Ipc::Cap<T> for the given capability and rights.
Definition ipc_types:785
Cap< T > make_cap_rw(L4::Cap< T > cap) noexcept
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
Definition ipc_types:795
Pair implementation.
Region mapper interface.
Shared_cap / Shared_del_cap.
An area is a range of virtual addresses which is reserved, see L4Re::Rm::reserve_area().
Definition rm:699
@ RWX
Readable, writable and executable region.
Definition rm:143
@ RW
Readable and writable region.
Definition rm:139
@ X
Executable region.
Definition rm:137
@ R
Readable region.
Definition rm:133
@ Detach_free
Free the portion of the data space after detach.
Definition rm:148
@ W
Writable region.
Definition rm:135
@ Search_addr
Search for a suitable address range.
Definition rm:113
@ In_area
Search only in area, or map into area.
Definition rm:115
Low-level assert implementation.
#define l4_assert(expr)
Low-level assert.
Definition assert.h:32