From f7d88d4d6f77528aaad8f7840b0b33f1d99add85 Mon Sep 17 00:00:00 2001 From: Philip Meulengracht Date: Fri, 19 May 2023 23:06:10 +0200 Subject: [PATCH] many: fix several memory issues, fix issue with filesystem transactions, fix issues in SHM buffers when exporting buffers with weird offsets --- kernel/handle.c | 232 +++++++++++++------------- kernel/include/handle.h | 4 +- kernel/memory/heap.c | 12 +- kernel/memory/ms_shm.c | 9 +- kernel/memory/ms_shm_test.c | 111 +++++++++++- kernel/sync/mutex.c | 2 + librt/libc/io/open.c | 66 ++------ librt/libc/stdio/fread.c | 2 +- librt/libc/stdio/fwrite.c | 3 +- librt/libddk/include/ddk/barrier.h | 2 +- librt/libos/include/os/shm.h | 12 ++ librt/libos/shm.c | 22 +++ modules/filesystems/common/requests.c | 36 +++- modules/filesystems/common/storage.c | 2 +- modules/filesystems/mfs/bucket_map.c | 27 ++- modules/filesystems/mfs/main.c | 19 ++- modules/filesystems/mfs/records.c | 15 +- modules/filesystems/mfs/utilities.c | 4 +- services/filed/vfs/handlers/unlink.c | 43 +++-- services/filed/vfs/utils.c | 24 +-- services/filed/vfs/vfs.c | 6 +- services/served/server/install.c | 22 ++- 22 files changed, 411 insertions(+), 264 deletions(-) diff --git a/kernel/handle.c b/kernel/handle.c index e053c7eb3..679a12d6e 100644 --- a/kernel/handle.c +++ b/kernel/handle.c @@ -29,31 +29,27 @@ #include #include #include -#include #include -struct resource_handle { - uuid_t id; - HandleType_t type; - unsigned int flags; - mstring_t* path; - void* resource; - int references; - HandleDestructorFn destructor; +// Handle is being destroyed +#define __HANDLE_FLAG_DESTROYING 0x1 + +struct ResourceHandle { + element_t QueueHeader; + uuid_t ID; + HandleType_t Type; + unsigned int Flags; + mstring_t* Path; + void* Resource; + int References; + HandleDestructorFn Destructor; }; -struct handle_mapping { +struct HandleMapping { mstring_t* path; uuid_t handle; }; -struct handle_cleanup { - element_t header; - mstring_t* path; - void* resource; - HandleDestructorFn destructor; -}; - _Noreturn static void HandleJanitorThread(void* arg); static uint64_t mapping_hash(const void* element); static int mapping_cmp(const void* element1, const void* element2); @@ -72,10 +68,10 @@ oserr_t InitializeHandles(void) { hashtable_construct(&g_handlemappings, HASHTABLE_MINIMUM_CAPACITY, - sizeof(struct handle_mapping), mapping_hash, + sizeof(struct HandleMapping), mapping_hash, mapping_cmp); hashtable_construct(&g_handles, HASHTABLE_MINIMUM_CAPACITY, - sizeof(struct resource_handle), handle_hash, + sizeof(struct ResourceHandle), handle_hash, handle_cmp); SpinlockConstruct(&g_handlesLock); atomic_store(&g_nextHandleId, 1); @@ -91,35 +87,39 @@ InitializeHandleJanitor(void) &g_janitorHandle); } -static inline struct resource_handle* -LookupSafeHandleInstance( - _In_ uuid_t handleId) +static inline struct ResourceHandle* +__LookupSafe( + _In_ uuid_t ID) { - struct resource_handle* handle; + struct ResourceHandle* handle; if (!atomic_load(&g_nextHandleId)) { return NULL; } - SpinlockAcquireIrq(&g_handlesLock); - handle = hashtable_get(&g_handles, &(struct resource_handle) { .id = handleId }); - SpinlockReleaseIrq(&g_handlesLock); + handle = hashtable_get(&g_handles, &(struct ResourceHandle) { .ID = ID }); + if (handle == NULL || (handle->Flags & __HANDLE_FLAG_DESTROYING)) { + return NULL; + } return handle; } -static struct resource_handle* +static struct ResourceHandle* __AcquireHandle( _In_ uuid_t handleId) { - struct resource_handle* handle; + struct ResourceHandle* handle; if (!atomic_load(&g_nextHandleId)) { return NULL; } SpinlockAcquireIrq(&g_handlesLock); - handle = hashtable_get(&g_handles, &(struct resource_handle) { .id = handleId }); - if (handle) { - handle->references++; + handle = hashtable_get(&g_handles, &(struct ResourceHandle) { .ID = handleId }); + if (handle == NULL || (handle->Flags & __HANDLE_FLAG_DESTROYING)) { + SpinlockReleaseIrq(&g_handlesLock); + return NULL; } + + handle->References++; SpinlockReleaseIrq(&g_handlesLock); return handle; } @@ -130,22 +130,23 @@ CreateHandle( _In_ HandleDestructorFn destructor, _In_ void* resource) { - struct resource_handle handle; - void* existing; - - handle.id = atomic_fetch_add(&g_nextHandleId, 1); - handle.type = handleType; - handle.path = NULL; - handle.resource = resource; - handle.destructor = destructor; - handle.references = 1; - handle.flags = 0; + struct ResourceHandle handle; + void* existing; + + ELEMENT_INIT(&handle.QueueHeader, NULL, NULL); + handle.ID = atomic_fetch_add(&g_nextHandleId, 1); + handle.Type = handleType; + handle.Path = NULL; + handle.Resource = resource; + handle.Destructor = destructor; + handle.References = 1; + handle.Flags = 0; SpinlockAcquireIrq(&g_handlesLock); existing = hashtable_set(&g_handles, &handle); assert(existing == NULL); SpinlockReleaseIrq(&g_handlesLock); - return handle.id; + return handle.ID; } oserr_t @@ -153,13 +154,15 @@ AcquireHandle( _In_ uuid_t handleId, _Out_ void** resourceOut) { - struct resource_handle* handle = __AcquireHandle(handleId); + struct ResourceHandle* handle; + + handle = __AcquireHandle(handleId); if (!handle) { return OS_ENOENT; } if (resourceOut) { - *resourceOut = handle->resource; + *resourceOut = handle->Resource; } return OS_EOK; } @@ -170,22 +173,22 @@ AcquireHandleOfType( _In_ HandleType_t handleType, _Out_ void** resourceOut) { - struct resource_handle* handle; + struct ResourceHandle* handle; handle = __AcquireHandle(handleId); if (!handle) { return OS_ENOENT; } - if (handle->type != handleType) { + if (handle->Type != handleType) { ERROR("AcquireHandleOfType requested handle type %u, but handle was of type %u", - handleType, handle->type); + handleType, handle->Type); DestroyHandle(handleId); return OS_EUNKNOWN; } if (resourceOut) { - *resourceOut = handle->resource; + *resourceOut = handle->Resource; } return OS_EOK; } @@ -195,10 +198,10 @@ RegisterHandlePath( _In_ uuid_t handleId, _In_ const char* path) { - struct resource_handle* handle; - struct handle_mapping* mapping; + struct ResourceHandle* handle; + struct HandleMapping* mapping; mstring_t* internalPath; - DEBUG("[handle_register_path] %u => %s", handleId, path); + DEBUG("RegisterHandlePath(id=%u, path=%s)", handleId, path); internalPath = mstr_new_u8(path); @@ -208,20 +211,20 @@ RegisterHandlePath( } SpinlockAcquireIrq(&g_handlesLock); - handle = hashtable_get(&g_handles, &(struct resource_handle) { .id = handleId }); - if (!handle) { + handle = __LookupSafe(handleId); + if (handle == NULL) { SpinlockReleaseIrq(&g_handlesLock); mstr_delete(internalPath); return OS_ENOENT; } - if (handle->path) { + if (handle->Path) { SpinlockReleaseIrq(&g_handlesLock); mstr_delete(internalPath); return OS_EUNKNOWN; } - mapping = hashtable_get(&g_handlemappings, &(struct handle_mapping) { .path = internalPath }); + mapping = hashtable_get(&g_handlemappings, &(struct HandleMapping) { .path = internalPath }); if (mapping) { SpinlockReleaseIrq(&g_handlesLock); mstr_delete(internalPath); @@ -229,8 +232,8 @@ RegisterHandlePath( } // store the new mapping, and update the handle instance - hashtable_set(&g_handlemappings, &(struct handle_mapping) { .path = internalPath, .handle = handleId }); - handle->path = internalPath; + hashtable_set(&g_handlemappings, &(struct HandleMapping) { .path = internalPath, .handle = handleId }); + handle->Path = internalPath; SpinlockReleaseIrq(&g_handlesLock); return OS_EOK; @@ -241,9 +244,9 @@ LookupHandleByPath( _In_ const char* path, _Out_ uuid_t* handleOut) { - struct handle_mapping* mapping; - mstring_t* internalPath; - TRACE("[handle_lookup_by_path] %s", path); + struct HandleMapping* mapping; + mstring_t* internalPath; + TRACE("LookupHandleByPath(%s)", path); internalPath = mstr_new_u8(path); @@ -253,7 +256,7 @@ LookupHandleByPath( } SpinlockAcquireIrq(&g_handlesLock); - mapping = hashtable_get(&g_handlemappings, &(struct handle_mapping) { .path = internalPath }); + mapping = hashtable_get(&g_handlemappings, &(struct HandleMapping) { .path = internalPath }); if (mapping && handleOut) { *handleOut = mapping->handle; } @@ -264,78 +267,77 @@ LookupHandleByPath( void* LookupHandleOfType( - _In_ uuid_t handleId, - _In_ HandleType_t handleType) + _In_ uuid_t ID, + _In_ HandleType_t type) { - struct resource_handle* handle = LookupSafeHandleInstance(handleId); - if (!handle || handle->type != handleType) { + struct ResourceHandle* handle; + void* resource; + + SpinlockAcquireIrq(&g_handlesLock); + handle = __LookupSafe(ID); + if (handle == NULL || handle->Type != type) { + SpinlockReleaseIrq(&g_handlesLock); return NULL; } - return handle->resource; -} -static void AddHandleToCleanup( - _In_ void* resource, - _In_ HandleDestructorFn dctor, - _In_ mstring_t* path) -{ - struct handle_cleanup* cleanup; - - cleanup = kmalloc(sizeof(struct handle_cleanup)); - assert(cleanup != NULL); - - ELEMENT_INIT(&cleanup->header, NULL, cleanup); - cleanup->path = path; - cleanup->resource = resource; - cleanup->destructor = dctor; - - queue_push(&g_cleanQueue, &cleanup->header); - SemaphoreSignal(&g_eventHandle, 1); + resource = handle->Resource; + SpinlockReleaseIrq(&g_handlesLock); + return resource; } oserr_t DestroyHandle( _In_ uuid_t handleId) { - struct resource_handle* handle; - void* resource; - HandleDestructorFn dctor; - mstring_t* path; + struct ResourceHandle* handle; SpinlockAcquireIrq(&g_handlesLock); - handle = hashtable_get(&g_handles, &(struct resource_handle) { .id = handleId }); - if (!handle) { + handle = __LookupSafe(handleId); + if (handle == NULL) { SpinlockReleaseIrq(&g_handlesLock); return OS_ENOENT; } // do nothing if there still is active handles - handle->references--; - if (handle->references) { + handle->References--; + if (handle->References) { SpinlockReleaseIrq(&g_handlesLock); return OS_EINCOMPLETE; } - // store some resources before releaseing lock - resource = handle->resource; - dctor = handle->destructor; - path = handle->path; - if (path) { - hashtable_remove(&g_handlemappings, &(struct handle_mapping) { .path = path }); + // mark handle for destruction + handle->Flags |= __HANDLE_FLAG_DESTROYING; + if (handle->Path) { + hashtable_remove(&g_handlemappings, &(struct HandleMapping) { .path = handle->Path }); } - hashtable_remove(&g_handles, &(struct resource_handle) { .id = handleId }); SpinlockReleaseIrq(&g_handlesLock); - AddHandleToCleanup(resource, dctor, path); + queue_push(&g_cleanQueue, &handle->QueueHeader); + SemaphoreSignal(&g_eventHandle, 1); return OS_EOK; } +static void +__CleanupHandle( + _In_ struct ResourceHandle* handle) +{ + if (handle->Destructor) { + handle->Destructor(handle->Resource); + } + if (handle->Path) { + mstr_delete(handle->Path); + } + + SpinlockAcquireIrq(&g_handlesLock); + hashtable_remove(&g_handles, &(struct ResourceHandle) { .ID = handle->ID }); + SpinlockReleaseIrq(&g_handlesLock); +} + _Noreturn static void HandleJanitorThread( _In_Opt_ void* arg) { - element_t* element; - struct handle_cleanup* cleanup; + element_t* element; _CRT_UNUSED(arg); for (;;) { @@ -343,15 +345,7 @@ HandleJanitorThread( element = queue_pop(&g_cleanQueue); while (element) { - cleanup = (struct handle_cleanup*)element->value; - if (cleanup->destructor) { - cleanup->destructor(cleanup->resource); - } - if (cleanup->path) { - mstr_delete(cleanup->path); - } - kfree(cleanup); - + __CleanupHandle((struct ResourceHandle*)element); element = queue_pop(&g_cleanQueue); } } @@ -359,28 +353,28 @@ HandleJanitorThread( static uint64_t mapping_hash(const void* element) { - const struct handle_mapping* entry = element; + const struct HandleMapping* entry = element; return mstr_hash(entry->path); // already unique identifier } static int mapping_cmp(const void* element1, const void* element2) { - const struct handle_mapping* lh = element1; - const struct handle_mapping* rh = element2; + const struct HandleMapping* lh = element1; + const struct HandleMapping* rh = element2; return mstr_cmp(lh->path, rh->path); } static uint64_t handle_hash(const void* element) { - const struct resource_handle* entry = element; - return entry->id; // already unique identifier + const struct ResourceHandle* entry = element; + return entry->ID; // already unique identifier } static int handle_cmp(const void* element1, const void* element2) { - const struct resource_handle* lh = element1; - const struct resource_handle* rh = element2; + const struct ResourceHandle* lh = element1; + const struct ResourceHandle* rh = element2; // return 0 on true, 1 on false - return lh->id == rh->id ? 0 : 1; + return lh->ID == rh->ID ? 0 : 1; } diff --git a/kernel/include/handle.h b/kernel/include/handle.h index d7683c9ab..298f34e5b 100644 --- a/kernel/include/handle.h +++ b/kernel/include/handle.h @@ -110,7 +110,7 @@ AcquireHandleOfType( */ KERNELAPI void* KERNELABI LookupHandleOfType( - _In_ uuid_t handleId, - _In_ HandleType_t handleType); + _In_ uuid_t ID, + _In_ HandleType_t type); #endif //! __HANDLE_H__ diff --git a/kernel/memory/heap.c b/kernel/memory/heap.c index ebf5596cc..720cf3e13 100644 --- a/kernel/memory/heap.c +++ b/kernel/memory/heap.c @@ -720,8 +720,7 @@ MemoryCacheAllocate( if (Slab->NumberOfFreeObjects == 1) { list_remove(&Cache->PartialSlabs, Element); } - } - else { + } else { Element = list_front(&Cache->FreeSlabs); assert(Element != NULL); @@ -741,8 +740,7 @@ MemoryCacheAllocate( Cache->NumberOfFreeObjects--; Allocated = MEMORY_SLAB_ELEMENT(Cache, Slab, Index); - } - else if (!(Cache->Flags & HEAP_SINGLE_SLAB)) { + } else if (!(Cache->Flags & HEAP_SINGLE_SLAB)) { Slab = __SlabCreate(Cache); if (!Slab) { MutexUnlock(&Cache->SyncObject); @@ -755,15 +753,13 @@ MemoryCacheAllocate( if (!Slab->NumberOfFreeObjects) { list_append(&Cache->FullSlabs, &Slab->Header); - } - else { + } else { list_append(&Cache->PartialSlabs, &Slab->Header); Cache->NumberOfFreeObjects += (Cache->ObjectCount - 1); } Allocated = MEMORY_SLAB_ELEMENT(Cache, Slab, Index); - } - else { + } else { ERROR("[heap] [%s] ran out of objects %i/%i", Cache->Name, Cache->NumberOfFreeObjects, Cache->ObjectCount); Allocated = NULL; diff --git a/kernel/memory/ms_shm.c b/kernel/memory/ms_shm.c index 8da48978f..12859da28 100644 --- a/kernel/memory/ms_shm.c +++ b/kernel/memory/ms_shm.c @@ -1038,7 +1038,7 @@ __UpdateMapping( ); } -oserr_t +static oserr_t __CreateMapping( _In_ struct SHMBuffer* shmBuffer, _In_ size_t offset, @@ -1073,7 +1073,11 @@ __CreateMapping( &(struct MemorySpaceMapOptions) { .SHMTag = shmBuffer->ID, .Pages = &shmBuffer->Pages[pageIndex], - .Length = length, + // Ensure that the length we are requesting pages for actually cover + // the asked mapping range. If we are requesting a mapping from 0xFFF0 + // and 32 bytes, we will do a page-crossing, and actually we are requesting + // the entire first page. + .Length = (length + (actualOffset % pageSize)), .Mask = shmBuffer->PageMask, .Flags = __RecalculateFlags(shmBuffer->Flags, flags), .PlacementFlags = MAPPING_PHYSICAL_FIXED | MAPPING_VIRTUAL_PROCESS @@ -1137,6 +1141,7 @@ SHMMap( if (oserr != OS_EOK) { return oserr; } + TRACE("SHMMap: mapping: 0x%p\n", mapping); // Increase the mapping with the built-in offset // if this was an exported buffer diff --git a/kernel/memory/ms_shm_test.c b/kernel/memory/ms_shm_test.c index 9092992b0..723629c85 100644 --- a/kernel/memory/ms_shm_test.c +++ b/kernel/memory/ms_shm_test.c @@ -851,7 +851,7 @@ void TestSHMConform_IsConformed(void** state) // needs to contain the expected setup for the virtual region g_testContext.MemorySpaceMap.Calls[0].ExpectedSHMTag = 1; // expect 1 g_testContext.MemorySpaceMap.Calls[0].CheckSHMTag = true; - g_testContext.MemorySpaceMap.Calls[0].ExpectedLength = 0x8400; + g_testContext.MemorySpaceMap.Calls[0].ExpectedLength = (0x8400 + 0x856); g_testContext.MemorySpaceMap.Calls[0].CheckLength = true; g_testContext.MemorySpaceMap.Calls[0].ExpectedFlags = MAPPING_PERSISTENT | MAPPING_USERSPACE | MAPPING_READONLY; g_testContext.MemorySpaceMap.Calls[0].CheckFlags = true; @@ -1678,6 +1678,112 @@ void TestSHMMap_Simple(void** state) TeardownTest(state); } +static void __CreateExportedSHM(SHMHandle_t* shm, const void* buffer, size_t size) +{ + int pageCount; + paddr_t* pages; + oserr_t oserr; + paddr_t startAddress = 0x1000000; + + // Create page array + pageCount = (int)((size + (GetMemorySpacePageSize() - 1)) / GetMemorySpacePageSize()); + pages = test_malloc(pageCount * sizeof(paddr_t)); + assert_non_null(pages); + + for (int i = 0; i < pageCount; i++) { + pages[i] = startAddress + (i * GetMemorySpacePageSize()); + } + + // The following function calls are expected during normal + // creation: + + // 1. CreateHandle, nothing really we care enough to check here except + // that it gets invoked as expected. The default returned value is 1 + + // 2. GetMemorySpaceMapping + g_testContext.GetMemorySpaceMapping.ExpectedAddress = (vaddr_t)buffer; + g_testContext.GetMemorySpaceMapping.CheckAddress = true; + g_testContext.GetMemorySpaceMapping.ExpectedPageCount = pageCount; + g_testContext.GetMemorySpaceMapping.CheckPageCount = true; + g_testContext.GetMemorySpaceMapping.PageValues = pages; + g_testContext.GetMemorySpaceMapping.PageValuesProvided = true; + g_testContext.GetMemorySpaceMapping.ReturnValue = OS_EOK; + + oserr = SHMExport( + (void*)buffer, + size, + 0, + 0, + shm + ); + + // free resources before asserting + test_free(pages); + assert_int_equal(oserr, OS_EOK); +} + +void TestSHMMap_SimpleExported(void** state) +{ + oserr_t oserr; + SHMHandle_t shm; + void* buffer; + int sgCount; + SHMSG_t* sg; + + // Allocate a new buffer, with wierd values + buffer = (void*)0x109586; + + // Create a normal buffer we can use. It must not be comitted. The SHM + // is already filled, which we don't want as we are trying to make a separate + // mapping that is not the original. + __CreateExportedSHM(&shm, buffer, 0xC888); + + // Ensure new mapping + shm.Buffer = NULL; + + // 1. LookupHandleOfType + g_testContext.LookupHandleOfType.ReturnValue = g_testContext.CreateHandle.Calls[0].CreatedResource; + + // 2. MemorySpaceMap, this is the most interesting call to check, as that + // needs to contain the expected setup for the virtual region + g_testContext.MemorySpaceMap.Calls[0].ExpectedSHMTag = 1; // expect 1 + g_testContext.MemorySpaceMap.Calls[0].CheckSHMTag = true; + g_testContext.MemorySpaceMap.Calls[0].ExpectedLength = (0xC888 + 0x586); + g_testContext.MemorySpaceMap.Calls[0].CheckLength = true; + g_testContext.MemorySpaceMap.Calls[0].ExpectedFlags = MAPPING_PERSISTENT | MAPPING_USERSPACE; + g_testContext.MemorySpaceMap.Calls[0].CheckFlags = true; + g_testContext.MemorySpaceMap.Calls[0].ExpectedPlacement = MAPPING_VIRTUAL_PROCESS | MAPPING_PHYSICAL_FIXED; + g_testContext.MemorySpaceMap.Calls[0].CheckPlacement = true; + g_testContext.MemorySpaceMap.Calls[0].ReturnedMapping = 0x20000; + g_testContext.MemorySpaceMap.Calls[0].ReturnedMappingProvided = true; + g_testContext.MemorySpaceMap.Calls[0].ReturnValue = OS_EOK; + oserr = SHMMap( + &shm, + 0, + shm.Capacity, + SHM_ACCESS_READ | SHM_ACCESS_WRITE + ); + assert_int_equal(oserr, OS_EOK); + assert_ptr_equal(shm.Buffer, (0x20000 + 0x586)); + assert_int_equal(shm.Length, 0xC888); + + // Ensure that the SG entry shows up correctly + oserr = SHMBuildSG(shm.ID, &sgCount, NULL); + assert_int_equal(oserr, OS_EOK); + assert_int_equal(sgCount, 1); + + sg = test_malloc(sgCount * sizeof(SHMSG_t)); + assert_non_null(sg); + + oserr = SHMBuildSG(shm.ID, &sgCount, sg); + assert_int_equal(oserr, OS_EOK); + assert_int_equal(sg[0].Address, (0x1000000 + 0x586)); + assert_int_equal(sg[0].Length, 0xCA7A); + + test_free(sg); + TeardownTest(state); +} + void TestSHMMap_CanCommit(void** state) { oserr_t oserr; @@ -1741,7 +1847,7 @@ void TestSHMMap_CanRemap(void** state) // 2. MemorySpaceMap, expect a new mapping of 0x3000 when we map from page 2-4 g_testContext.MemorySpaceMap.Calls[1].ExpectedSHMTag = 1; // expect 1 g_testContext.MemorySpaceMap.Calls[1].CheckSHMTag = true; - g_testContext.MemorySpaceMap.Calls[1].ExpectedLength = 0x2890; + g_testContext.MemorySpaceMap.Calls[1].ExpectedLength = 0x3000; g_testContext.MemorySpaceMap.Calls[1].CheckLength = true; g_testContext.MemorySpaceMap.Calls[1].ExpectedFlags = MAPPING_PERSISTENT | MAPPING_USERSPACE; g_testContext.MemorySpaceMap.Calls[1].CheckFlags = true; @@ -1797,6 +1903,7 @@ int main(void) cmocka_unit_test_setup(TestSHMAttach_PrivateFailed, SetupTest), cmocka_unit_test_setup(TestSHMAttach_InvalidID, SetupTest), cmocka_unit_test_setup(TestSHMMap_Simple, SetupTest), + cmocka_unit_test_setup(TestSHMMap_SimpleExported, SetupTest), cmocka_unit_test_setup(TestSHMMap_CanCommit, SetupTest), cmocka_unit_test_setup(TestSHMMap_CanRemap, SetupTest), }; diff --git a/kernel/sync/mutex.c b/kernel/sync/mutex.c index 32040c24c..5f5ea3493 100644 --- a/kernel/sync/mutex.c +++ b/kernel/sync/mutex.c @@ -102,6 +102,8 @@ __SlowLock( irqstate_t intStatus; uuid_t owner; + // TODO: Detect mutex locking during IRQs + // Disable interrupts and try to acquire the lock or wait for the lock // to unlock if it's held on another CPU - however we only wait for a brief period intStatus = InterruptDisable(); diff --git a/librt/libc/io/open.c b/librt/libc/io/open.c index ab484742c..b9861b4d4 100644 --- a/librt/libc/io/open.c +++ b/librt/libc/io/open.c @@ -15,7 +15,7 @@ * along with this program. If not, see . */ -#define __TRACE +//#define __TRACE #define __need_minmax #include #include @@ -152,7 +152,7 @@ __transfer( _In_ size_t chunkSize, _In_ size_t offset, _In_ size_t length, - _Out_ size_t* bytesTransferreOut) + _Out_ size_t* bytesTransferedOut) { size_t bytesLeft = length; oserr_t oserr; @@ -182,7 +182,7 @@ __transfer( offset += bytesTransferred; } - *bytesTransferreOut = length - bytesLeft; + *bytesTransferedOut = length - bytesLeft; return oserr; } @@ -194,32 +194,14 @@ __read_large( _Out_ size_t* bytesReadOut) { OSHandle_t shm; - void* adjustedPointer = (void*)buffer; - size_t adjustedLength = length; - size_t pageSize = MemoryPageSize(); oserr_t oserr; TRACE("__read_large(buffer=0x%" PRIxIN ", length=%" PRIuIN ")", buffer, length); - // enforce page alignment on the buffer - if ((uintptr_t)buffer & (pageSize - 1)) { - size_t bytesToAlign = pageSize - ((uintptr_t)buffer & (pageSize - 1)); - TRACE("__read_large: aligning buffer=0x%" PRIxIN ", align=0x%" PRIxIN, - buffer, bytesToAlign); - oserr = __file_read(handle, buffer, bytesToAlign, bytesReadOut); - if (oserr != OS_EOK || *bytesReadOut == 0) { - return oserr; - } - adjustedPointer = (void*)((uintptr_t)buffer + bytesToAlign); - adjustedLength -= bytesToAlign; - } - - TRACE("__read_large: exporting buffer=0x%" PRIxIN ", length=0x%" PRIxIN, - adjustedPointer, adjustedLength); oserr = SHMExport( - adjustedPointer, + buffer, &(SHM_t) { .Access = SHM_ACCESS_READ | SHM_ACCESS_WRITE, - .Size = adjustedLength + .Size = length }, &shm ); @@ -232,15 +214,11 @@ __read_large( handle->OSHandle.ID, shm.ID, false, - adjustedLength, + length, 0, - adjustedLength, + length, bytesReadOut ); - if (*bytesReadOut == adjustedLength) { - *bytesReadOut = length; - } - OSHandleDestroy(&shm); return oserr; } @@ -290,32 +268,13 @@ __write_large( _Out_ size_t* bytesWrittenOut) { OSHandle_t shm; - void* adjustedPointer = (void*)buffer; - size_t adjustedLength = length; - size_t pageSize = MemoryPageSize(); oserr_t oserr; TRACE("__write_large(buffer=0x%" PRIxIN ", length=%" PRIuIN ")", buffer, length); - - // enforce page alignment on the buffer - if ((uintptr_t)buffer & (pageSize - 1)) { - size_t bytesToAlign = pageSize - ((uintptr_t)buffer & (pageSize - 1)); - TRACE("__write_large: aligning buffer=0x%" PRIxIN ", align=0x%" PRIxIN, - buffer, bytesToAlign); - oserr = __file_write(handle, buffer, bytesToAlign, bytesWrittenOut); - if (oserr != OS_EOK|| *bytesWrittenOut == 0) { - return oserr; - } - adjustedPointer = (void*)((uintptr_t)buffer + bytesToAlign); - adjustedLength -= bytesToAlign; - } - - TRACE("__write_large: exporting buffer=0x%" PRIxIN ", length=0x%" PRIxIN, - adjustedPointer, adjustedLength); oserr = SHMExport( - adjustedPointer, + (void*)buffer, &(SHM_t) { .Access = SHM_ACCESS_READ | SHM_ACCESS_WRITE, - .Size = adjustedLength + .Size = length }, &shm ); @@ -326,15 +285,12 @@ __write_large( oserr = __transfer( handle->OSHandle.ID, shm.ID, true, - adjustedLength, + length, 0, - adjustedLength, + length, bytesWrittenOut ); OSHandleDestroy(&shm); - if (*bytesWrittenOut == adjustedLength) { - *bytesWrittenOut = length; - } return oserr; } diff --git a/librt/libc/stdio/fread.c b/librt/libc/stdio/fread.c index 1f7005bc2..d10011185 100644 --- a/librt/libc/stdio/fread.c +++ b/librt/libc/stdio/fread.c @@ -15,8 +15,8 @@ * along with this program. If not, see . */ +//#define __TRACE #define __need_minmax -#define __TRACE #include #include #include diff --git a/librt/libc/stdio/fwrite.c b/librt/libc/stdio/fwrite.c index e436dfc04..f8a614195 100644 --- a/librt/libc/stdio/fwrite.c +++ b/librt/libc/stdio/fwrite.c @@ -15,9 +15,8 @@ * along with this program. If not, see . */ +//#define __TRACE #define __need_minmax -#define __TRACE - #include #include #include diff --git a/librt/libddk/include/ddk/barrier.h b/librt/libddk/include/ddk/barrier.h index b905e3da0..cd5658b53 100644 --- a/librt/libddk/include/ddk/barrier.h +++ b/librt/libddk/include/ddk/barrier.h @@ -78,7 +78,7 @@ /////////////////////////////////////////////////////////////////////////////// //// Clang /////////////////////////////////////////////////////////////////////////////// -#elif defined(__clang__) +#elif defined(__clang__) || defined(__GNUC__) #define sw_mb() __asm__ __volatile__ ( "" ::: "memory" ) #define sw_rmb() __asm__ __volatile__ ( "" ::: "memory" ) #define sw_wmb() __asm__ __volatile__ ( "" ::: "memory" ) diff --git a/librt/libos/include/os/shm.h b/librt/libos/include/os/shm.h index 3396318c9..04498254a 100644 --- a/librt/libos/include/os/shm.h +++ b/librt/libos/include/os/shm.h @@ -158,6 +158,18 @@ CRTDECL(size_t, SHMBufferCapacity( _In_ OSHandle_t* handle)); +/** + * @brief Returns the current offset in use for the shared memory buffer handle. + * The value returned is only valid for buffers that have been mapped, or exported. + * That means either SHMMap, SHMConform or SHMExport must have been called on this + * handle. + * @param[In] handle The OS handle that represents a SHM buffer. + * @return The current offset into the underlying buffer region this mapping represents. + */ +CRTDECL(size_t, +SHMBufferOffset( + _In_ OSHandle_t* handle)); + /** * Call this once with the count parameter to get the number of * scatter-gather entries, then the second time with the memory_sg parameter diff --git a/librt/libos/shm.c b/librt/libos/shm.c index e8ce6ef20..e676e963b 100644 --- a/librt/libos/shm.c +++ b/librt/libos/shm.c @@ -303,6 +303,28 @@ SHMBufferLength( return ((SHMHandle_t*)handle->Payload)->Length; } +size_t +SHMBufferOffset( + _In_ OSHandle_t* handle) +{ + SHMHandle_t* shm; + if (handle == NULL || handle->Payload == NULL) { + return 0; + } + // The offset in the handle can mean two different things, either + // it means the actual offset the buffer was mapped with, or it means + // the offset into the source buffer. (Conforming buffers use this). + // This means that conformed buffers always have an effective offset of + // 0, so handle this correctly on a userspace level. + shm = handle->Payload; + if (shm->SourceID != UUID_INVALID) { + // The buffer is a conformed buffer, they *always* have effective + // offsets of 0. + return 0; + } + return shm->Offset; +} + size_t SHMBufferCapacity( _In_ OSHandle_t* handle) diff --git a/modules/filesystems/common/requests.c b/modules/filesystems/common/requests.c index 27acb34de..cb21f1ad3 100644 --- a/modules/filesystems/common/requests.c +++ b/modules/filesystems/common/requests.c @@ -251,6 +251,7 @@ static inline oserr_t __MapUserBufferRead( _In_ struct FSBaseContext* fsBaseContext, _In_ uuid_t handle, + _In_ size_t offset, _In_ size_t length, _In_ OSHandle_t* shm) { @@ -262,7 +263,7 @@ __MapUserBufferRead( }, SHM_CONFORM_BACKFILL_ON_UNMAP, SHM_ACCESS_READ | SHM_ACCESS_WRITE, - 0, + offset, length, shm ); @@ -276,7 +277,13 @@ void ctt_filesystem_read_invocation(struct gracht_message* message, const uintpt size_t read; TRACE("ctt_filesystem_read_invocation()"); - oserr = __MapUserBufferRead((void*)fsctx, params->buffer_id, (size_t)params->count, &shm); + oserr = __MapUserBufferRead( + (void*)fsctx, + params->buffer_id, + (size_t)params->offset, + (size_t)params->count, + &shm + ); if (oserr != OS_EOK) { ctt_filesystem_read_response(message, oserr, 0); return; @@ -285,9 +292,12 @@ void ctt_filesystem_read_invocation(struct gracht_message* message, const uintpt oserr = FsRead( (void*)fsctx, (void*)fctx, - params->buffer_id, + // Important (!) to note here that we are reading the effective buffer values which may + // have changed after calling SHMConform. We don't know anymore whether we have + // the original buffer anymore. + shm.ID, SHMBuffer(&shm), - params->offset, + SHMBufferOffset(&shm), (size_t)params->count, // TODO: be consistent with size units &read ); @@ -309,6 +319,7 @@ static inline oserr_t __MapUserBufferWrite( _In_ struct FSBaseContext* fsBaseContext, _In_ uuid_t handle, + _In_ size_t offset, _In_ size_t length, _In_ OSHandle_t* shm) { @@ -320,7 +331,7 @@ __MapUserBufferWrite( }, SHM_CONFORM_FILL_ON_CREATION, SHM_ACCESS_READ, - 0, + offset, length, shm ); @@ -333,7 +344,13 @@ void ctt_filesystem_write_invocation(struct gracht_message* message, const uintp size_t written; TRACE("ctt_filesystem_write_invocation()"); - oserr = __MapUserBufferWrite((void*)fsctx, params->buffer_id, (size_t)params->count, &shm); + oserr = __MapUserBufferWrite( + (void*)fsctx, + params->buffer_id, + (size_t)params->offset, + (size_t)params->count, + &shm + ); if (oserr != OS_EOK) { ctt_filesystem_write_response(message, oserr, 0); return; @@ -342,9 +359,12 @@ void ctt_filesystem_write_invocation(struct gracht_message* message, const uintp oserr = FsWrite( (void*)fsctx, (void*)fctx, - params->buffer_id, + // Important (!) to note here that we are reading the effective buffer values which may + // have changed after calling SHMConform. We don't know anymore whether we have + // the original buffer anymore. + shm.ID, SHMBuffer(&shm), - params->offset, + SHMBufferOffset(&shm), (size_t)params->count, // TODO: be consistent with size units &written ); diff --git a/modules/filesystems/common/storage.c b/modules/filesystems/common/storage.c index 464795871..c0f50cf30 100644 --- a/modules/filesystems/common/storage.c +++ b/modules/filesystems/common/storage.c @@ -16,7 +16,7 @@ * */ -#define __TRACE +//#define __TRACE #include #include diff --git a/modules/filesystems/mfs/bucket_map.c b/modules/filesystems/mfs/bucket_map.c index 6a99c8829..9cc1abdc2 100644 --- a/modules/filesystems/mfs/bucket_map.c +++ b/modules/filesystems/mfs/bucket_map.c @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +//#define __TRACE #define __need_minmax #include #include @@ -24,6 +25,14 @@ #include #include +// TODO: +// - Implement bucket map transactions +// * BucketMapStartTransaction +// * BucketMapCommitTransaction +// * BucketMapAbortTransaction +// - Abstract the storage medium, multiple mediums +// - Expand buckets s to 64 bit. + static oserr_t __UpdateMasterRecords( _In_ FileSystemMFS_t* mfs) @@ -31,7 +40,6 @@ __UpdateMasterRecords( size_t sectorsTransferred; oserr_t oserr; void* buffer = SHMBuffer(&mfs->TransferBuffer); - TRACE("__UpdateMasterRecords()"); memset(buffer, 0, mfs->SectorSize); @@ -74,8 +82,7 @@ MfsZeroBucket( { size_t i; size_t sectorsTransferred; - - TRACE("MfsZeroBucket(Bucket %u, Count %u)", bucket, count); + TRACE("MfsZeroBucket(bucket=%u, count=%u)", bucket, count); memset(SHMBuffer(&mfs->TransferBuffer), 0, SHMBufferLength(&mfs->TransferBuffer)); for (i = 0; i < count; i++) { @@ -127,8 +134,8 @@ MFSBucketMapSetLinkAndLength( size_t sectorOffset; size_t sectorsTransferred; oserr_t oserr; - - TRACE("MFSBucketMapSetLinkAndLength(Bucket %u, Link %u)", bucket, link->Link); + TRACE("MFSBucketMapSetLinkAndLength(bucket=%u, link=%u, length=%u)", + bucket, link, length); // Update in-memory map first mfs->BucketMap[(bucket * 2)] = link; @@ -196,12 +203,12 @@ MFSBucketMapAllocate( allocatedRecord->Length = MIN(bucketsLeft, currentRecord.Length); } - // If i is longer than the number of buckets we need to allocate, then we must + // If 'i' is longer than the number of buckets we need to allocate, then we must // split up i. // i0 with the length of the number of buckets to allocate // i1 who starts from i0+number of buckets to allocate, with a length adjusted if (currentRecord.Length > bucketsLeft) { - // Update the current record to reflect it's new length and link + // Update the current record to reflect its new length and link oserr = MFSBucketMapSetLinkAndLength( mfs, i, @@ -233,6 +240,12 @@ MFSBucketMapAllocate( bucketsLeft = 0; } else if (currentRecord.Length == bucketsLeft) { bucketsLeft = 0; + + // just relink the bucket + oserr = MFSBucketMapSetLinkAndLength(mfs, i, MFS_ENDOFCHAIN, 0, false); + if (oserr != OS_EOK) { + return oserr; + } } else { // The length is less, so we need to allocate this segment and move on // to the link diff --git a/modules/filesystems/mfs/main.c b/modules/filesystems/mfs/main.c index 0b9beda52..6e1fd14be 100644 --- a/modules/filesystems/mfs/main.c +++ b/modules/filesystems/mfs/main.c @@ -144,33 +144,34 @@ FsStat( oserr_t FsUnlink( - _In_ void* instanceData, + _In_ void* instanceData, _In_ mstring_t* path) { FileSystemMFS_t* mfs = instanceData; - oserr_t osStatus; + oserr_t oserr; MFSEntry_t* mfsEntry; + TRACE("FsUnlink(%ms)", path); - osStatus = MfsLocateRecord( + oserr = MfsLocateRecord( mfs, &mfs->RootEntry, path, &mfsEntry); - if (osStatus != OS_EOK) { - return osStatus; + if (oserr != OS_EOK) { + return oserr; } - osStatus = MfsFreeBuckets(mfs, mfsEntry->StartBucket, mfsEntry->StartLength); - if (osStatus != OS_EOK) { + oserr = MfsFreeBuckets(mfs, mfsEntry->StartBucket, mfsEntry->StartLength); + if (oserr != OS_EOK) { ERROR("Failed to free the buckets at start 0x%x, length 0x%x", mfsEntry->StartBucket, mfsEntry->StartLength); goto cleanup; } - osStatus = MfsUpdateRecord(mfs, mfsEntry, MFS_ACTION_DELETE); + oserr = MfsUpdateRecord(mfs, mfsEntry, MFS_ACTION_DELETE); cleanup: MFSEntryDelete(mfsEntry); - return osStatus; + return oserr; } oserr_t diff --git a/modules/filesystems/mfs/records.c b/modules/filesystems/mfs/records.c index ba4714570..28e1881b3 100644 --- a/modules/filesystems/mfs/records.c +++ b/modules/filesystems/mfs/records.c @@ -21,7 +21,7 @@ * - Contains the implementation of the MFS driver for mollenos */ -#define __TRACE +//#define __TRACE #include #include @@ -211,6 +211,7 @@ __FindEntryOrFreeInDirectory( MapRecord_t link; int exitLoop = 0; + TRACE("__FindEntryOrFreeInDirectory: reading bucket %u", currentBucket); oserr = __ReadCurrentBucket(mfs, currentBucket, &link); if (oserr != OS_EOK) { ERROR("__FindEntryOrFreeInDirectory failed to read directory bucket"); @@ -505,21 +506,21 @@ MfsCreateRecord( _In_ uint32_t startLength, _Out_ MFSEntry_t** entryOut) { - oserr_t osStatus; + oserr_t oserr; MFSEntry_t nextEntry = { 0 }; TRACE("MfsCreateRecord(fileSystem=%ms, flags=0x%x, path=%ms)", mfs->Label, flags, name); - osStatus = __FindEntryOrFreeInDirectory( + oserr = __FindEntryOrFreeInDirectory( mfs, entry, name, 1, &nextEntry ); - if (osStatus == OS_ENOENT) { - osStatus = __CreateEntryInDirectory( + if (oserr == OS_ENOENT) { + oserr = __CreateEntryInDirectory( mfs, name, owner, @@ -536,6 +537,6 @@ MfsCreateRecord( ); } __CleanupEntryIterator(&nextEntry); - TRACE("MfsCreateRecord returns=%u", osStatus); - return osStatus; + TRACE("MfsCreateRecord returns=%u", oserr); + return oserr; } diff --git a/modules/filesystems/mfs/utilities.c b/modules/filesystems/mfs/utilities.c index c5b5da21d..a6ba93fb7 100644 --- a/modules/filesystems/mfs/utilities.c +++ b/modules/filesystems/mfs/utilities.c @@ -21,7 +21,7 @@ * - Contains the implementation of the MFS driver for mollenos */ -#define __TRACE +//#define __TRACE #include #include @@ -97,7 +97,7 @@ MfsUpdateRecord( oserr_t oserr; FileRecord_t* record; size_t sectorsTransferred; - TRACE("MfsUpdateEntry(File %ms)", entry->Name); + TRACE("MfsUpdateEntry(entry=%ms, action=%i)", entry->Name, action); // Read the stored data bucket where the record is oserr = FSStorageRead( diff --git a/services/filed/vfs/handlers/unlink.c b/services/filed/vfs/handlers/unlink.c index 3f11e9dbb..b684d0d30 100644 --- a/services/filed/vfs/handlers/unlink.c +++ b/services/filed/vfs/handlers/unlink.c @@ -22,7 +22,9 @@ #include #include "../private.h" -static oserr_t __VerifyCanDelete(struct VFSNode* node) +static oserr_t +__VerifyCanDelete( + _In_ struct VFSNode* node) { // We do not allow bind mounted nodes to be deleted. if (!__NodeIsRegular(node)) { @@ -35,7 +37,7 @@ static oserr_t __VerifyCanDelete(struct VFSNode* node) } // We do not allow deletion on nodes with open handles - if (node->Handles.element_size != 0) { + if (node->Handles.element_count != 0) { return OS_EBUSY; } @@ -49,7 +51,9 @@ static oserr_t __VerifyCanDelete(struct VFSNode* node) // __DeleteNode is a helper for deleting a VFS node. The node that has been // passed must not be locked by the caller, AND the caller must hold a read lock // on the parent -static oserr_t __DeleteNode(struct VFSNode* node) +static oserr_t +__DeleteNode( + _In_ struct VFSNode* node) { struct VFSOperations* ops; struct VFS* vfs; @@ -72,23 +76,26 @@ static oserr_t __DeleteNode(struct VFSNode* node) usched_rwlock_w_promote(&parent->Lock); // Get a write-lock on this node - usched_rwlock_r_lock(&node->Lock); + usched_rwlock_w_lock(&node->Lock); // Load node before deletion to make sure the node - // is up-to-date + // is up-to-date. oserr = VFSNodeEnsureLoaded(node); if (oserr != OS_EOK) { + ERROR("__DeleteNode: failed to ensure %ms was loaded: %u", node->Name, oserr); goto error; } oserr = __VerifyCanDelete(node); if (oserr != OS_EOK) { + ERROR("__DeleteNode: deletion checks failed: %u", oserr); goto error; } // OK at this point we are now allowed to perform the deletion oserr = ops->Unlink(vfs->Interface, vfs->Data, nodePath); if (oserr != OS_EOK) { + ERROR("__DeleteNode: failed to delete the underlying node: %u", oserr); goto error; } @@ -97,9 +104,12 @@ static oserr_t __DeleteNode(struct VFSNode* node) &(struct __VFSChild) { .Key = node->Name } ); VFSNodeDestroy(node); + goto done; error: - usched_rwlock_r_unlock(&node->Lock); + usched_rwlock_w_unlock(&node->Lock); + +done: usched_rwlock_w_demote(&parent->Lock); mstr_delete(nodePath); return oserr; @@ -128,26 +138,29 @@ oserr_t VFSNodeUnlink(struct VFS* vfs, const char* cpath) mstr_delete(path); struct VFSNode* containingDirectory; - oserr_t osStatus = VFSNodeGet( - vfs, containingDirectoryPath, - 1, &containingDirectory); + oserr_t oserr = VFSNodeGet( + vfs, + containingDirectoryPath, + 1, + &containingDirectory + ); mstr_delete(containingDirectoryPath); - if (osStatus != OS_EOK) { - return osStatus; + if (oserr != OS_EOK) { + return oserr; } // Find the requested entry in the containing folder struct VFSNode* node; - osStatus = VFSNodeFind(containingDirectory, nodeName, &node); - if (osStatus != OS_EOK && osStatus != OS_ENOENT) { + oserr = VFSNodeFind(containingDirectory, nodeName, &node); + if (oserr != OS_EOK && oserr != OS_ENOENT) { goto exit; } - osStatus = __DeleteNode(node); + oserr = __DeleteNode(node); exit: VFSNodePut(containingDirectory); mstr_delete(nodeName); - return osStatus; + return oserr; } diff --git a/services/filed/vfs/utils.c b/services/filed/vfs/utils.c index 69c538f56..af2d44246 100644 --- a/services/filed/vfs/utils.c +++ b/services/filed/vfs/utils.c @@ -16,7 +16,7 @@ * */ -#define __TRACE +//#define __TRACE #include #include @@ -329,7 +329,7 @@ oserr_t VFSNodeCreateLinkChild(struct VFSNode* node, mstring_t* name, mstring_t* struct VFSOperations* ops; struct VFS* vfs; struct __VFSChild* result; - oserr_t osStatus, osStatus2; + oserr_t oserr, oserr2; mstring_t* nodePath; void* data; @@ -352,21 +352,21 @@ oserr_t VFSNodeCreateLinkChild(struct VFSNode* node, mstring_t* name, mstring_t* // make sure the first we do is verify it still does not exist result = hashtable_get(&node->Children, &(struct __VFSChild) { .Key = name }); if (result != NULL) { - osStatus = OS_EEXISTS; + oserr = OS_EEXISTS; goto cleanup; } - osStatus = ops->Open(vfs->Interface, vfs->Data, nodePath, &data); - if (osStatus != OS_EOK) { + oserr = ops->Open(vfs->Interface, vfs->Data, nodePath, &data); + if (oserr != OS_EOK) { goto cleanup; } - osStatus = ops->Link(vfs->Interface, vfs->Data, data, name, target, symbolic); - if (osStatus != OS_EOK) { + oserr = ops->Link(vfs->Interface, vfs->Data, data, name, target, symbolic); + if (oserr != OS_EOK) { goto close; } - osStatus = VFSNodeChildNew(node->FileSystem, node, &(struct VFSStat) { + oserr = VFSNodeChildNew(node->FileSystem, node, &(struct VFSStat) { .Name = mstr_clone(name), .Size = mstr_bsize(target), .Owner = 0, @@ -375,15 +375,15 @@ oserr_t VFSNodeCreateLinkChild(struct VFSNode* node, mstring_t* name, mstring_t* }, nodeOut); close: - osStatus2 = ops->Close(vfs->Interface, vfs->Data, data); - if (osStatus2 != OS_EOK) { - WARNING("__CreateInNode failed to cleanup handle with code %u", osStatus2); + oserr2 = ops->Close(vfs->Interface, vfs->Data, data); + if (oserr2 != OS_EOK) { + WARNING("__CreateInNode failed to cleanup handle with code %u", oserr2); } cleanup: usched_rwlock_w_demote(&node->Lock); mstr_delete(nodePath); - return osStatus; + return oserr; } struct __HandleExcCheckContext { diff --git a/services/filed/vfs/vfs.c b/services/filed/vfs/vfs.c index 0239c25e1..000006024 100644 --- a/services/filed/vfs/vfs.c +++ b/services/filed/vfs/vfs.c @@ -288,10 +288,8 @@ void VFSNodeDestroy(struct VFSNode* node) hashtable_destroy(&node->Mounts); mstr_delete(node->Name); - mstr_delete(node->Stats.Name); - if (node->Stats.LinkTarget != NULL) { - mstr_delete(node->Stats.LinkTarget); - } + // Stats.Name is a pointer to .Name + mstr_delete(node->Stats.LinkTarget); free(node); } diff --git a/services/served/server/install.c b/services/served/server/install.c index 498a47b91..5f5d80a8c 100644 --- a/services/served/server/install.c +++ b/services/served/server/install.c @@ -182,6 +182,18 @@ oserr_t InstallApplication(mstring_t* path, const char* basename) return __RegisterApplication(application); } +static void +__RemoveApplication( + _In_ mstring_t* path) +{ + TRACE("__RemoveApplication(path=%ms)", path); + char* pathu8 = mstr_u8(path); + if (unlink(pathu8)) { + ERROR("__RemoveApplication: failed to unlink path %s", pathu8); + } + free(pathu8); +} + void InstallBundledApplications(void) { struct DIR* setupDir; @@ -197,15 +209,11 @@ void InstallBundledApplications(void) while ((entry = readdir(setupDir)) != NULL) { mstring_t* path = mstr_fmt("/data/setup/%s", &entry->d_name[0]); - oserr_t oserr = InstallApplication(path, &entry->d_name[0]); + oserr_t oserr = InstallApplication(path, &entry->d_name[0]); if (oserr != OS_EOK) { - // Not a compatable file, delete it - char* pathu8 = mstr_u8(path); - if (unlink(pathu8)) { - ERROR("InstallBundledApplications failed to unlink path %s", pathu8); - } - free(pathu8); + ERROR("InstallBundledApplications: failed to install %ms", path); } + __RemoveApplication(path); mstr_delete(path); }