diff --git a/Objects/mimalloc/alloc.c b/Objects/mimalloc/alloc.c index c133f23fc9830d..44c84cf1931717 100644 --- a/Objects/mimalloc/alloc.c +++ b/Objects/mimalloc/alloc.c @@ -237,7 +237,7 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? { - // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free? + // Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free? // (continue in separate function to improve code generation) is_double_free = mi_check_is_double_freex(page, block); } diff --git a/Objects/mimalloc/arena.c b/Objects/mimalloc/arena.c index f8883603860dce..8801d4d32253dd 100644 --- a/Objects/mimalloc/arena.c +++ b/Objects/mimalloc/arena.c @@ -269,7 +269,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar return p; } -// allocate in a speficic arena +// allocate in a specific arena static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) { diff --git a/Objects/mimalloc/bitmap.h b/Objects/mimalloc/bitmap.h index 9ba15d5d6f09ea..d60f6dd3cb8e27 100644 --- a/Objects/mimalloc/bitmap.h +++ b/Objects/mimalloc/bitmap.h @@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file /* ---------------------------------------------------------------------------- Concurrent bitmap that can set/reset sequences of bits atomically, -represeted as an array of fields where each field is a machine word (`size_t`) +represented as an array of fields where each field is a machine word (`size_t`) There are two api's; the standard one cannot have sequences that cross between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). @@ -72,7 +72,7 @@ bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_ // For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields. bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx); -// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled +// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fulfilled typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg); bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx); diff --git a/Objects/mimalloc/init.c b/Objects/mimalloc/init.c index cb0ef6642803cc..81b241063ff40f 100644 --- a/Objects/mimalloc/init.c +++ b/Objects/mimalloc/init.c @@ -560,7 +560,7 @@ void mi_process_init(void) mi_attr_noexcept { _mi_verbose_message("secure level: %d\n", MI_SECURE); _mi_verbose_message("mem tracking: %s\n", MI_TRACK_TOOL); #if MI_TSAN - _mi_verbose_message("thread santizer enabled\n"); + _mi_verbose_message("thread sanitizer enabled\n"); #endif mi_thread_init(); diff --git a/Objects/mimalloc/options.c b/Objects/mimalloc/options.c index 345b560e3e7f4c..2a8f481d569e1e 100644 --- a/Objects/mimalloc/options.c +++ b/Objects/mimalloc/options.c @@ -269,7 +269,7 @@ static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop // (recursively) invoke malloc again to allocate space for the thread local // variables on demand. This is why we use a _mi_preloading test on such // platforms. However, C code generator may move the initial thread local address -// load before the `if` and we therefore split it out in a separate funcion. +// load before the `if` and we therefore split it out in a separate function. static mi_decl_thread bool recurse = false; static mi_decl_noinline bool mi_recurse_enter_prim(void) { diff --git a/Objects/mimalloc/page.c b/Objects/mimalloc/page.c index 25ecd6ec7d7983..ff7444cce10923 100644 --- a/Objects/mimalloc/page.c +++ b/Objects/mimalloc/page.c @@ -481,7 +481,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { if (index < heap->page_retired_min) heap->page_retired_min = index; if (index > heap->page_retired_max) heap->page_retired_max = index; mi_assert_internal(mi_page_all_free(page)); - return; // dont't free after all + return; // don't free after all } } _PyMem_mi_page_maybe_free(page, pq, false); diff --git a/Objects/mimalloc/segment-map.c b/Objects/mimalloc/segment-map.c index 3cd2127e56c1a7..5141f2e1b1dffa 100644 --- a/Objects/mimalloc/segment-map.c +++ b/Objects/mimalloc/segment-map.c @@ -84,7 +84,7 @@ static mi_segment_t* _mi_segment_of(const void* p) { // TODO: maintain max/min allocated range for efficiency for more efficient rejection of invalid pointers? // search downwards for the first segment in case it is an interior pointer - // could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps trough + // could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps through // valid huge objects // note: we could maintain a lowest index to speed up the path for invalid pointers? size_t lobitidx; diff --git a/Objects/mimalloc/segment.c b/Objects/mimalloc/segment.c index 0b4d3abc07a93c..9b092b9b734d4c 100644 --- a/Objects/mimalloc/segment.c +++ b/Objects/mimalloc/segment.c @@ -718,7 +718,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i // set slice back pointers for the first MI_MAX_SLICE_OFFSET entries size_t extra = slice_count-1; if (extra > MI_MAX_SLICE_OFFSET) extra = MI_MAX_SLICE_OFFSET; - if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices + if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than available entries in the segment->slices mi_slice_t* slice_next = slice + 1; for (size_t i = 1; i <= extra; i++, slice_next++) {