diff --git a/lib/std/heap.zig b/lib/std/heap.zig index f0ea8d0d467f..6fbc3d8b75ff 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -42,7 +42,9 @@ pub var next_mmap_addr_hint: ?[*]align(page_size_min) u8 = null; /// /// On many systems, the actual page size can only be determined at runtime /// with `pageSize`. -pub const page_size_min: usize = std.options.page_size_min orelse (page_size_min_default orelse +pub const page_size_min: usize = std.options.page_size_min orelse (page_size_min_default orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other) + @compileError("freestanding/other page_size_min must provided with std.options.page_size_min") +else @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_min; populate std.options.page_size_min")); /// comptime-known maximum page size of the target. @@ -829,10 +831,8 @@ const page_size_min_default: ?usize = switch (builtin.os.tag) { .xtensa => 4 << 10, else => null, }, - .freestanding, .other => switch (builtin.cpu.arch) { + .freestanding => switch (builtin.cpu.arch) { .wasm32, .wasm64 => 64 << 10, - .x86, .x86_64 => 4 << 10, - .aarch64, .aarch64_be => 4 << 10, else => null, }, else => null, diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 016f3ab9da7a..81f061be1701 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -6,6 +6,7 @@ const math = std.math; const mem = @This(); const testing = std.testing; const Endian = std.builtin.Endian; +const native_os = builtin.target.os.tag; const native_endian = builtin.cpu.arch.endian(); /// The standard library currently thoroughly depends on byte size @@ -1091,19 +1092,20 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co if (backend_supports_vectors and !std.debug.inValgrind() and // https://github.com/ziglang/zig/issues/17717 !@inComptime() and - (@typeInfo(T) == .int or @typeInfo(T) == .float) and std.math.isPowerOfTwo(@bitSizeOf(T))) + (@typeInfo(T) == .int or @typeInfo(T) == .float) and std.math.isPowerOfTwo(@bitSizeOf(T)) and + (native_os != .freestanding and native_os != .other and native_os != .uefi)) { switch (@import("builtin").cpu.arch) { // The below branch assumes that reading past the end of the buffer is valid, as long // as we don't read into a new page. This should be the case for most architectures // which use paged memory, however should be confirmed before adding a new arch below. .aarch64, .x86, .x86_64 => if (std.simd.suggestVectorLength(T)) |block_len| { - const page_size = std.heap.page_size_min; + const page_size = std.heap.pageSize(); const block_size = @sizeOf(T) * block_len; const Block = @Vector(block_len, T); const mask: Block = @splat(sentinel); - comptime assert(std.heap.page_size_min % @sizeOf(Block) == 0); + comptime assert(std.heap.page_size_max % @sizeOf(Block) == 0); assert(page_size % @sizeOf(Block) == 0); // First block may be unaligned @@ -1153,7 +1155,7 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co test "indexOfSentinel vector paths" { const Types = [_]type{ u8, u16, u32, u64 }; const allocator = std.testing.allocator; - const page_size = std.heap.page_size_min; + const page_size = std.heap.pageSize(); inline for (Types) |T| { const block_len = std.simd.suggestVectorLength(T) orelse continue;