diff --git a/build.zig b/build.zig index 9a52a35275fe..15762f0ae881 100644 --- a/build.zig +++ b/build.zig @@ -257,13 +257,10 @@ pub fn build(b: *std.Build) !void { var code: u8 = undefined; const git_describe_untrimmed = b.runAllowFail(&[_][]const u8{ "git", - "-C", - b.build_root.path orelse ".", - "describe", - "--match", - "*.*.*", - "--tags", - "--abbrev=9", + "-C", b.build_root.path orelse ".", // affects the --git-dir argument + "--git-dir", ".git", // affected by the -C argument + "describe", "--match", "*.*.*", // + "--tags", "--abbrev=9", }, &code, .Ignore) catch { break :v version_string; }; diff --git a/doc/langref.html.in b/doc/langref.html.in index 5ab601ab991a..d190d195e237 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5498,8 +5498,9 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val {#header_open|@splat#}
{#syntax#}@splat(scalar: anytype) anytype{#endsyntax#}

- Produces a vector where each element is the value {#syntax#}scalar{#endsyntax#}. - The return type and thus the length of the vector is inferred. + Produces an array or vector where each element is the value + {#syntax#}scalar{#endsyntax#}. The return type and thus the length of the + vector is inferred.

{#code|test_splat_builtin.zig#} diff --git a/doc/langref/test_splat_builtin.zig b/doc/langref/test_splat_builtin.zig index 74bddfe3b01d..d11556b1a3f2 100644 --- a/doc/langref/test_splat_builtin.zig +++ b/doc/langref/test_splat_builtin.zig @@ -7,4 +7,10 @@ test "vector @splat" { try expect(std.mem.eql(u32, &@as([4]u32, result), &[_]u32{ 5, 5, 5, 5 })); } +test "array @splat" { + const scalar: u32 = 5; + const result: [4]u32 = @splat(scalar); + try expect(std.mem.eql(u32, &@as([4]u32, result), &[_]u32{ 5, 5, 5, 5 })); +} + // test diff --git a/lib/compiler/aro/aro/target.zig b/lib/compiler/aro/aro/target.zig index 7495eb5d9ae4..bea982daa2d3 100644 --- a/lib/compiler/aro/aro/target.zig +++ b/lib/compiler/aro/aro/target.zig @@ -204,7 +204,7 @@ pub fn unnamedFieldAffectsAlignment(target: std.Target) bool { }, .armeb => { if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) { - if (std.Target.Abi.default(target.cpu.arch, target.os) == .eabi) return true; + if (std.Target.Abi.default(target.cpu.arch, target.os.tag) == .eabi) return true; } }, .arm => return true, @@ -716,7 +716,7 @@ test "alignment functions - smoke test" { const x86 = std.Target.Cpu.Arch.x86_64; target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86, .none); target.cpu = std.Target.Cpu.baseline(x86, target.os); - target.abi = std.Target.Abi.default(x86, target.os); + target.abi = std.Target.Abi.default(x86, target.os.tag); try std.testing.expect(isTlsSupported(target)); try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(target)); @@ -729,7 +729,7 @@ test "alignment functions - smoke test" { const arm = std.Target.Cpu.Arch.arm; target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm, .none); target.cpu = std.Target.Cpu.baseline(arm, target.os); - target.abi = std.Target.Abi.default(arm, target.os); + target.abi = std.Target.Abi.default(arm, target.os.tag); try std.testing.expect(!isTlsSupported(target)); try std.testing.expect(ignoreNonZeroSizedBitfieldTypeAlignment(target)); diff --git a/lib/compiler_rt/int_from_float.zig b/lib/compiler_rt/int_from_float.zig index 7bbcd90893de..0c2c73bb427f 100644 --- a/lib/compiler_rt/int_from_float.zig +++ b/lib/compiler_rt/int_from_float.zig @@ -72,10 +72,12 @@ pub inline fn bigIntFromFloat(comptime signedness: std.builtin.Signedness, resul } }); const parts = math.frexp(a); - const exponent = @max(parts.exponent - significand_bits, 0); + const significand_bits_adjusted_to_handle_smin = @as(i32, significand_bits) + + @intFromBool(signedness == .signed and parts.exponent == 32 * result.len); + const exponent = @max(parts.exponent - significand_bits_adjusted_to_handle_smin, 0); const int: I = @intFromFloat(switch (exponent) { 0 => a, - else => math.ldexp(parts.significand, significand_bits), + else => math.ldexp(parts.significand, significand_bits_adjusted_to_handle_smin), }); switch (signedness) { .signed => { diff --git a/lib/compiler_rt/int_from_float_test.zig b/lib/compiler_rt/int_from_float_test.zig index e10ed1ec00e2..5305ecf2a065 100644 --- a/lib/compiler_rt/int_from_float_test.zig +++ b/lib/compiler_rt/int_from_float_test.zig @@ -24,6 +24,8 @@ const __fixdfdi = @import("fixdfdi.zig").__fixdfdi; const __fixunsdfdi = @import("fixunsdfdi.zig").__fixunsdfdi; const __fixdfti = @import("fixdfti.zig").__fixdfti; const __fixunsdfti = @import("fixunsdfti.zig").__fixunsdfti; +const __fixdfei = @import("fixdfei.zig").__fixdfei; +const __fixunsdfei = @import("fixunsdfei.zig").__fixunsdfei; // Conversion from f128 const __fixtfsi = @import("fixtfsi.zig").__fixtfsi; @@ -681,6 +683,44 @@ test "fixunsdfti" { try test__fixunsdfti(-0x1.FFFFFFFFFFFFEp+62, 0); } +fn test_fixdfei(comptime T: type, expected: T, a: f64) !void { + const int = @typeInfo(T).int; + var expected_buf: [@divExact(int.bits, 32)]u32 = undefined; + std.mem.writeInt(T, std.mem.asBytes(&expected_buf), expected, endian); + var actual_buf: [@divExact(int.bits, 32)]u32 = undefined; + _ = switch (int.signedness) { + .signed => __fixdfei, + .unsigned => __fixunsdfei, + }(&actual_buf, int.bits, a); + try testing.expect(std.mem.eql(u32, &expected_buf, &actual_buf)); +} + +test "fixdfei" { + try test_fixdfei(i256, -1 << 255, -0x1p255); + try test_fixdfei(i256, -1 << 127, -0x1p127); + try test_fixdfei(i256, -1 << 100, -0x1p100); + try test_fixdfei(i256, -1 << 50, -0x1p50); + try test_fixdfei(i256, -1 << 1, -0x1p1); + try test_fixdfei(i256, -1 << 0, -0x1p0); + try test_fixdfei(i256, 0, 0); + try test_fixdfei(i256, 1 << 0, 0x1p0); + try test_fixdfei(i256, 1 << 1, 0x1p1); + try test_fixdfei(i256, 1 << 50, 0x1p50); + try test_fixdfei(i256, 1 << 100, 0x1p100); + try test_fixdfei(i256, 1 << 127, 0x1p127); + try test_fixdfei(i256, 1 << 254, 0x1p254); +} + +test "fixundfei" { + try test_fixdfei(u256, 0, 0); + try test_fixdfei(u256, 1 << 0, 0x1p0); + try test_fixdfei(u256, 1 << 1, 0x1p1); + try test_fixdfei(u256, 1 << 50, 0x1p50); + try test_fixdfei(u256, 1 << 100, 0x1p100); + try test_fixdfei(u256, 1 << 127, 0x1p127); + try test_fixdfei(u256, 1 << 255, 0x1p255); +} + fn test__fixtfsi(a: f128, expected: i32) !void { const x = __fixtfsi(a); try testing.expect(x == expected); diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 24edee37a838..932ec005177c 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1254,11 +1254,6 @@ fn testGetCurrentFileTimestamp(dir: fs.Dir) !i128 { } test "cache file and then recall it" { - if (builtin.os.tag == .wasi) { - // https://github.com/ziglang/zig/issues/5437 - return error.SkipZigTest; - } - var tmp = testing.tmpDir(.{}); defer tmp.cleanup(); @@ -1320,11 +1315,6 @@ test "cache file and then recall it" { } test "check that changing a file makes cache fail" { - if (builtin.os.tag == .wasi) { - // https://github.com/ziglang/zig/issues/5437 - return error.SkipZigTest; - } - var tmp = testing.tmpDir(.{}); defer tmp.cleanup(); @@ -1394,11 +1384,6 @@ test "check that changing a file makes cache fail" { } test "no file inputs" { - if (builtin.os.tag == .wasi) { - // https://github.com/ziglang/zig/issues/5437 - return error.SkipZigTest; - } - var tmp = testing.tmpDir(.{}); defer tmp.cleanup(); @@ -1442,11 +1427,6 @@ test "no file inputs" { } test "Manifest with files added after initial hash work" { - if (builtin.os.tag == .wasi) { - // https://github.com/ziglang/zig/issues/5437 - return error.SkipZigTest; - } - var tmp = testing.tmpDir(.{}); defer tmp.cleanup(); diff --git a/lib/std/Build/Step/InstallArtifact.zig b/lib/std/Build/Step/InstallArtifact.zig index 3d404eb8ca92..acf392f49f4e 100644 --- a/lib/std/Build/Step/InstallArtifact.zig +++ b/lib/std/Build/Step/InstallArtifact.zig @@ -189,9 +189,9 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const src_dir_path = dir.source.getPath3(b, step); const full_h_prefix = b.getInstallPath(h_dir, dir.dest_rel_path); - var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.sub_path, .{ .iterate = true }) catch |err| { - return step.fail("unable to open source directory '{s}': {s}", .{ - src_dir_path.sub_path, @errorName(err), + var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { + return step.fail("unable to open source directory '{}': {s}", .{ + src_dir_path, @errorName(err), }); }; defer src_dir.close(); diff --git a/lib/std/Target.zig b/lib/std/Target.zig index 8e78e4dc29ed..79a78a41dfd1 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -803,8 +803,8 @@ pub const Abi = enum { // - raygeneration // - vertex - pub fn default(arch: Cpu.Arch, os: Os) Abi { - return switch (os.tag) { + pub fn default(arch: Cpu.Arch, os_tag: Os.Tag) Abi { + return switch (os_tag) { .freestanding, .other => switch (arch) { // Soft float is usually a sane default for freestanding. .arm, diff --git a/lib/std/Target/Query.zig b/lib/std/Target/Query.zig index 2d5c73410871..cf53a8175b72 100644 --- a/lib/std/Target/Query.zig +++ b/lib/std/Target/Query.zig @@ -102,7 +102,7 @@ pub fn fromTarget(target: Target) Query { .os_version_min = undefined, .os_version_max = undefined, .abi = target.abi, - .glibc_version = target.os.versionRange().gnuLibCVersion(), + .glibc_version = if (target.abi.isGnu()) target.os.versionRange().gnuLibCVersion() else null, .android_api_level = if (target.abi.isAndroid()) target.os.version_range.linux.android else null, }; result.updateOsVersionRange(target.os); diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index 7849e98b4282..bedba88a7328 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -50,19 +50,19 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { } /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn init(allocator: Allocator) Self { + pub fn init(gpa: Allocator) Self { return Self{ .items = &[_]T{}, .capacity = 0, - .allocator = allocator, + .allocator = gpa, }; } /// Initialize with capacity to hold `num` elements. /// The resulting capacity will equal `num` exactly. /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn initCapacity(allocator: Allocator, num: usize) Allocator.Error!Self { - var self = Self.init(allocator); + pub fn initCapacity(gpa: Allocator, num: usize) Allocator.Error!Self { + var self = Self.init(gpa); try self.ensureTotalCapacityPrecise(num); return self; } @@ -75,24 +75,24 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { } /// ArrayList takes ownership of the passed in slice. The slice must have been - /// allocated with `allocator`. + /// allocated with `gpa`. /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn fromOwnedSlice(allocator: Allocator, slice: Slice) Self { + pub fn fromOwnedSlice(gpa: Allocator, slice: Slice) Self { return Self{ .items = slice, .capacity = slice.len, - .allocator = allocator, + .allocator = gpa, }; } /// ArrayList takes ownership of the passed in slice. The slice must have been - /// allocated with `allocator`. + /// allocated with `gpa`. /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn fromOwnedSliceSentinel(allocator: Allocator, comptime sentinel: T, slice: [:sentinel]T) Self { + pub fn fromOwnedSliceSentinel(gpa: Allocator, comptime sentinel: T, slice: [:sentinel]T) Self { return Self{ .items = slice, .capacity = slice.len + 1, - .allocator = allocator, + .allocator = gpa, }; } @@ -646,9 +646,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Initialize with capacity to hold `num` elements. /// The resulting capacity will equal `num` exactly. /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn initCapacity(allocator: Allocator, num: usize) Allocator.Error!Self { + pub fn initCapacity(gpa: Allocator, num: usize) Allocator.Error!Self { var self = Self{}; - try self.ensureTotalCapacityPrecise(allocator, num); + try self.ensureTotalCapacityPrecise(gpa, num); return self; } @@ -664,19 +664,18 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ } /// Release all allocated memory. - pub fn deinit(self: *Self, allocator: Allocator) void { - allocator.free(self.allocatedSlice()); + pub fn deinit(self: *Self, gpa: Allocator) void { + gpa.free(self.allocatedSlice()); self.* = undefined; } /// Convert this list into an analogous memory-managed one. /// The returned list has ownership of the underlying memory. - pub fn toManaged(self: *Self, allocator: Allocator) ArrayListAligned(T, alignment) { - return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator }; + pub fn toManaged(self: *Self, gpa: Allocator) ArrayListAligned(T, alignment) { + return .{ .items = self.items, .capacity = self.capacity, .allocator = gpa }; } - /// ArrayListUnmanaged takes ownership of the passed in slice. The slice must have been - /// allocated with `allocator`. + /// ArrayListUnmanaged takes ownership of the passed in slice. /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn fromOwnedSlice(slice: Slice) Self { return Self{ @@ -685,8 +684,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ }; } - /// ArrayListUnmanaged takes ownership of the passed in slice. The slice must have been - /// allocated with `allocator`. + /// ArrayListUnmanaged takes ownership of the passed in slice. /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn fromOwnedSliceSentinel(comptime sentinel: T, slice: [:sentinel]T) Self { return Self{ @@ -697,31 +695,31 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// The caller owns the returned memory. Empties this ArrayList. /// Its capacity is cleared, making deinit() safe but unnecessary to call. - pub fn toOwnedSlice(self: *Self, allocator: Allocator) Allocator.Error!Slice { + pub fn toOwnedSlice(self: *Self, gpa: Allocator) Allocator.Error!Slice { const old_memory = self.allocatedSlice(); - if (allocator.remap(old_memory, self.items.len)) |new_items| { + if (gpa.remap(old_memory, self.items.len)) |new_items| { self.* = .empty; return new_items; } - const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len); + const new_memory = try gpa.alignedAlloc(T, alignment, self.items.len); @memcpy(new_memory, self.items); - self.clearAndFree(allocator); + self.clearAndFree(gpa); return new_memory; } /// The caller owns the returned memory. ArrayList becomes empty. - pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) { + pub fn toOwnedSliceSentinel(self: *Self, gpa: Allocator, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) { // This addition can never overflow because `self.items` can never occupy the whole address space - try self.ensureTotalCapacityPrecise(allocator, self.items.len + 1); + try self.ensureTotalCapacityPrecise(gpa, self.items.len + 1); self.appendAssumeCapacity(sentinel); - const result = try self.toOwnedSlice(allocator); + const result = try self.toOwnedSlice(gpa); return result[0 .. result.len - 1 :sentinel]; } /// Creates a copy of this ArrayList. - pub fn clone(self: Self, allocator: Allocator) Allocator.Error!Self { - var cloned = try Self.initCapacity(allocator, self.capacity); + pub fn clone(self: Self, gpa: Allocator) Allocator.Error!Self { + var cloned = try Self.initCapacity(gpa, self.capacity); cloned.appendSliceAssumeCapacity(self.items); return cloned; } @@ -731,8 +729,8 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// This operation is O(N). /// Invalidates element pointers if additional memory is needed. /// Asserts that the index is in bounds or equal to the length. - pub fn insert(self: *Self, allocator: Allocator, i: usize, item: T) Allocator.Error!void { - const dst = try self.addManyAt(allocator, i, 1); + pub fn insert(self: *Self, gpa: Allocator, i: usize, item: T) Allocator.Error!void { + const dst = try self.addManyAt(gpa, i, 1); dst[0] = item; } @@ -759,11 +757,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Asserts that the index is in bounds or equal to the length. pub fn addManyAt( self: *Self, - allocator: Allocator, + gpa: Allocator, index: usize, count: usize, ) Allocator.Error![]T { - var managed = self.toManaged(allocator); + var managed = self.toManaged(gpa); defer self.* = managed.moveToUnmanaged(); return managed.addManyAt(index, count); } @@ -795,12 +793,12 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Asserts that the index is in bounds or equal to the length. pub fn insertSlice( self: *Self, - allocator: Allocator, + gpa: Allocator, index: usize, items: []const T, ) Allocator.Error!void { const dst = try self.addManyAt( - allocator, + gpa, index, items.len, ); @@ -812,7 +810,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Asserts that the range is in bounds. pub fn replaceRange( self: *Self, - allocator: Allocator, + gpa: Allocator, start: usize, len: usize, new_items: []const T, @@ -823,7 +821,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ const first = new_items[0..range.len]; const rest = new_items[range.len..]; @memcpy(range[0..first.len], first); - try self.insertSlice(allocator, after_range, rest); + try self.insertSlice(gpa, after_range, rest); } else { self.replaceRangeAssumeCapacity(start, len, new_items); } @@ -859,8 +857,8 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Extend the list by 1 element. Allocates more memory as necessary. /// Invalidates element pointers if additional memory is needed. - pub fn append(self: *Self, allocator: Allocator, item: T) Allocator.Error!void { - const new_item_ptr = try self.addOne(allocator); + pub fn append(self: *Self, gpa: Allocator, item: T) Allocator.Error!void { + const new_item_ptr = try self.addOne(gpa); new_item_ptr.* = item; } @@ -899,8 +897,8 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Append the slice of items to the list. Allocates more /// memory as necessary. /// Invalidates element pointers if additional memory is needed. - pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) Allocator.Error!void { - try self.ensureUnusedCapacity(allocator, items.len); + pub fn appendSlice(self: *Self, gpa: Allocator, items: []const T) Allocator.Error!void { + try self.ensureUnusedCapacity(gpa, items.len); self.appendSliceAssumeCapacity(items); } @@ -918,8 +916,8 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// memory as necessary. Only call this function if a call to `appendSlice` instead would /// be a compile error. /// Invalidates element pointers if additional memory is needed. - pub fn appendUnalignedSlice(self: *Self, allocator: Allocator, items: []align(1) const T) Allocator.Error!void { - try self.ensureUnusedCapacity(allocator, items.len); + pub fn appendUnalignedSlice(self: *Self, gpa: Allocator, items: []align(1) const T) Allocator.Error!void { + try self.ensureUnusedCapacity(gpa, items.len); self.appendUnalignedSliceAssumeCapacity(items); } @@ -947,8 +945,8 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ std.io.Writer(WriterContext, Allocator.Error, appendWrite); /// Initializes a Writer which will append to the list. - pub fn writer(self: *Self, allocator: Allocator) Writer { - return .{ .context = .{ .self = self, .allocator = allocator } }; + pub fn writer(self: *Self, gpa: Allocator) Writer { + return .{ .context = .{ .self = self, .allocator = gpa } }; } /// Same as `append` except it returns the number of bytes written, @@ -983,9 +981,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Invalidates element pointers if additional memory is needed. /// The function is inline so that a comptime-known `value` parameter will /// have a more optimal memset codegen in case it has a repeated byte pattern. - pub inline fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) Allocator.Error!void { + pub inline fn appendNTimes(self: *Self, gpa: Allocator, value: T, n: usize) Allocator.Error!void { const old_len = self.items.len; - try self.resize(allocator, try addOrOom(old_len, n)); + try self.resize(gpa, try addOrOom(old_len, n)); @memset(self.items[old_len..self.items.len], value); } @@ -1004,15 +1002,15 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Adjust the list length to `new_len`. /// Additional elements contain the value `undefined`. /// Invalidates element pointers if additional memory is needed. - pub fn resize(self: *Self, allocator: Allocator, new_len: usize) Allocator.Error!void { - try self.ensureTotalCapacity(allocator, new_len); + pub fn resize(self: *Self, gpa: Allocator, new_len: usize) Allocator.Error!void { + try self.ensureTotalCapacity(gpa, new_len); self.items.len = new_len; } /// Reduce allocated capacity to `new_len`. /// May invalidate element pointers. /// Asserts that the new length is less than or equal to the previous length. - pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void { + pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void { assert(new_len <= self.items.len); if (@sizeOf(T) == 0) { @@ -1021,13 +1019,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ } const old_memory = self.allocatedSlice(); - if (allocator.remap(old_memory, new_len)) |new_items| { + if (gpa.remap(old_memory, new_len)) |new_items| { self.capacity = new_items.len; self.items = new_items; return; } - const new_memory = allocator.alignedAlloc(T, alignment, new_len) catch |e| switch (e) { + const new_memory = gpa.alignedAlloc(T, alignment, new_len) catch |e| switch (e) { error.OutOfMemory => { // No problem, capacity is still correct then. self.items.len = new_len; @@ -1036,7 +1034,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ }; @memcpy(new_memory, self.items[0..new_len]); - allocator.free(old_memory); + gpa.free(old_memory); self.items = new_memory; self.capacity = new_memory.len; } @@ -1056,8 +1054,8 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ } /// Invalidates all element pointers. - pub fn clearAndFree(self: *Self, allocator: Allocator) void { - allocator.free(self.allocatedSlice()); + pub fn clearAndFree(self: *Self, gpa: Allocator) void { + gpa.free(self.allocatedSlice()); self.items.len = 0; self.capacity = 0; } @@ -1073,7 +1071,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// If the current capacity is less than `new_capacity`, this function will /// modify the array so that it can hold exactly `new_capacity` items. /// Invalidates element pointers if additional memory is needed. - pub fn ensureTotalCapacityPrecise(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void { + pub fn ensureTotalCapacityPrecise(self: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void { if (@sizeOf(T) == 0) { self.capacity = math.maxInt(usize); return; @@ -1087,13 +1085,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ // the allocator implementation would pointlessly copy our // extra capacity. const old_memory = self.allocatedSlice(); - if (allocator.remap(old_memory, new_capacity)) |new_memory| { + if (gpa.remap(old_memory, new_capacity)) |new_memory| { self.items.ptr = new_memory.ptr; self.capacity = new_memory.len; } else { - const new_memory = try allocator.alignedAlloc(T, alignment, new_capacity); + const new_memory = try gpa.alignedAlloc(T, alignment, new_capacity); @memcpy(new_memory[0..self.items.len], self.items); - allocator.free(old_memory); + gpa.free(old_memory); self.items.ptr = new_memory.ptr; self.capacity = new_memory.len; } @@ -1103,10 +1101,10 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Invalidates element pointers if additional memory is needed. pub fn ensureUnusedCapacity( self: *Self, - allocator: Allocator, + gpa: Allocator, additional_count: usize, ) Allocator.Error!void { - return self.ensureTotalCapacity(allocator, try addOrOom(self.items.len, additional_count)); + return self.ensureTotalCapacity(gpa, try addOrOom(self.items.len, additional_count)); } /// Increases the array's length to match the full capacity that is already allocated. @@ -1118,10 +1116,10 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Increase length by 1, returning pointer to the new item. /// The returned element pointer becomes invalid when the list is resized. - pub fn addOne(self: *Self, allocator: Allocator) Allocator.Error!*T { + pub fn addOne(self: *Self, gpa: Allocator) Allocator.Error!*T { // This can never overflow because `self.items` can never occupy the whole address space const newlen = self.items.len + 1; - try self.ensureTotalCapacity(allocator, newlen); + try self.ensureTotalCapacity(gpa, newlen); return self.addOneAssumeCapacity(); } @@ -1139,9 +1137,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Resize the array, adding `n` new elements, which have `undefined` values. /// The return value is an array pointing to the newly allocated elements. /// The returned pointer becomes invalid when the list is resized. - pub fn addManyAsArray(self: *Self, allocator: Allocator, comptime n: usize) Allocator.Error!*[n]T { + pub fn addManyAsArray(self: *Self, gpa: Allocator, comptime n: usize) Allocator.Error!*[n]T { const prev_len = self.items.len; - try self.resize(allocator, try addOrOom(self.items.len, n)); + try self.resize(gpa, try addOrOom(self.items.len, n)); return self.items[prev_len..][0..n]; } @@ -1161,9 +1159,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// The return value is a slice pointing to the newly allocated elements. /// The returned pointer becomes invalid when the list is resized. /// Resizes list if `self.capacity` is not large enough. - pub fn addManyAsSlice(self: *Self, allocator: Allocator, n: usize) Allocator.Error![]T { + pub fn addManyAsSlice(self: *Self, gpa: Allocator, n: usize) Allocator.Error![]T { const prev_len = self.items.len; - try self.resize(allocator, try addOrOom(self.items.len, n)); + try self.resize(gpa, try addOrOom(self.items.len, n)); return self.items[prev_len..][0..n]; } diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig index 9ca6aa5e48ef..c89141ec7d04 100644 --- a/lib/std/crypto/Certificate.zig +++ b/lib/std/crypto/Certificate.zig @@ -607,11 +607,10 @@ const Date = struct { } { - const is_leap = std.time.epoch.isLeapYear(date.year); var month: u4 = 1; while (month < date.month) : (month += 1) { const days: u64 = std.time.epoch.getDaysInMonth( - @as(std.time.epoch.YearLeapKind, @enumFromInt(@intFromBool(is_leap))), + date.year, @as(std.time.epoch.Month, @enumFromInt(month)), ); sec += days * std.time.epoch.secs_per_day; diff --git a/lib/std/enums.zig b/lib/std/enums.zig index cb928ac02301..ad495b252b70 100644 --- a/lib/std/enums.zig +++ b/lib/std/enums.zig @@ -55,7 +55,7 @@ pub fn values(comptime E: type) []const E { /// A safe alternative to @tagName() for non-exhaustive enums that doesn't /// panic when `e` has no tagged value. /// Returns the tag name for `e` or null if no tag exists. -pub fn tagName(comptime E: type, e: E) ?[]const u8 { +pub fn tagName(comptime E: type, e: E) ?[:0]const u8 { return inline for (@typeInfo(E).@"enum".fields) |f| { if (@intFromEnum(e) == f.value) break f.name; } else null; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 98fef1b2ce14..546435f4ab3c 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -320,14 +320,8 @@ test "accessAbsolute" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var arena = ArenaAllocator.init(testing.allocator); - defer arena.deinit(); - const allocator = arena.allocator(); - - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &.{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; + const base_path = try tmp.dir.realpathAlloc(testing.allocator, "."); + defer testing.allocator.free(base_path); try fs.accessAbsolute(base_path, .{}); } @@ -338,25 +332,52 @@ test "openDirAbsolute" { var tmp = tmpDir(.{}); defer tmp.cleanup(); + const tmp_ino = (try tmp.dir.stat()).inode; + try tmp.dir.makeDir("subdir"); - var arena = ArenaAllocator.init(testing.allocator); - defer arena.deinit(); - const allocator = arena.allocator(); + const sub_path = try tmp.dir.realpathAlloc(testing.allocator, "subdir"); + defer testing.allocator.free(sub_path); - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &.{ ".zig-cache", "tmp", tmp.sub_path[0..], "subdir" }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; + // Can open sub_path + var tmp_sub = try fs.openDirAbsolute(sub_path, .{}); + defer tmp_sub.close(); + + const sub_ino = (try tmp_sub.stat()).inode; { - var dir = try fs.openDirAbsolute(base_path, .{}); + // Can open sub_path + ".." + const dir_path = try fs.path.join(testing.allocator, &.{ sub_path, ".." }); + defer testing.allocator.free(dir_path); + + var dir = try fs.openDirAbsolute(dir_path, .{}); defer dir.close(); + + const ino = (try dir.stat()).inode; + try testing.expectEqual(tmp_ino, ino); } - for ([_][]const u8{ ".", ".." }) |sub_path| { - const dir_path = try fs.path.join(allocator, &.{ base_path, sub_path }); + { + // Can open sub_path + "." + const dir_path = try fs.path.join(testing.allocator, &.{ sub_path, "." }); + defer testing.allocator.free(dir_path); + var dir = try fs.openDirAbsolute(dir_path, .{}); defer dir.close(); + + const ino = (try dir.stat()).inode; + try testing.expectEqual(sub_ino, ino); + } + + { + // Can open subdir + "..", with some extra "." + const dir_path = try fs.path.join(testing.allocator, &.{ sub_path, ".", "..", "." }); + defer testing.allocator.free(dir_path); + + var dir = try fs.openDirAbsolute(dir_path, .{}); + defer dir.close(); + + const ino = (try dir.stat()).inode; + try testing.expectEqual(tmp_ino, ino); } } @@ -409,10 +430,7 @@ test "readLinkAbsolute" { defer arena.deinit(); const allocator = arena.allocator(); - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &.{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; + const base_path = try tmp.dir.realpathAlloc(allocator, "."); { const target_path = try fs.path.join(allocator, &.{ base_path, "file.txt" }); @@ -748,7 +766,6 @@ test "directory operations on files" { test "file operations on directories" { // TODO: fix this test on FreeBSD. https://github.com/ziglang/zig/issues/1759 if (native_os == .freebsd) return error.SkipZigTest; - if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/20747 try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { @@ -759,18 +776,30 @@ test "file operations on directories" { try testing.expectError(error.IsDir, ctx.dir.createFile(test_dir_name, .{})); try testing.expectError(error.IsDir, ctx.dir.deleteFile(test_dir_name)); switch (native_os) { - // no error when reading a directory. - .dragonfly, .netbsd => {}, - // Currently, WASI will return error.Unexpected (via ENOTCAPABLE) when attempting fd_read on a directory handle. - // TODO: Re-enable on WASI once https://github.com/bytecodealliance/wasmtime/issues/1935 is resolved. - .wasi => {}, + .dragonfly, .netbsd => { + // no error when reading a directory. See https://github.com/ziglang/zig/issues/5732 + const buf = try ctx.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize)); + testing.allocator.free(buf); + }, + .wasi => { + // WASI return EBADF, which gets mapped to NotOpenForReading. + // See https://github.com/bytecodealliance/wasmtime/issues/1935 + try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize))); + }, else => { try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize))); }, } - // Note: The `.mode = .read_write` is necessary to ensure the error occurs on all platforms. - // TODO: Add a read-only test as well, see https://github.com/ziglang/zig/issues/5732 - try testing.expectError(error.IsDir, ctx.dir.openFile(test_dir_name, .{ .mode = .read_write })); + + if (native_os == .wasi and builtin.link_libc) { + // wasmtime unexpectedly succeeds here, see https://github.com/ziglang/zig/issues/20747 + const handle = try ctx.dir.openFile(test_dir_name, .{ .mode = .read_write }); + handle.close(); + } else { + // Note: The `.mode = .read_write` is necessary to ensure the error occurs on all platforms. + // TODO: Add a read-only test as well, see https://github.com/ziglang/zig/issues/5732 + try testing.expectError(error.IsDir, ctx.dir.openFile(test_dir_name, .{ .mode = .read_write })); + } if (ctx.path_type == .absolute and comptime PathType.absolute.isSupported(builtin.os)) { try testing.expectError(error.IsDir, fs.createFileAbsolute(test_dir_name, .{})); @@ -993,10 +1022,7 @@ test "renameAbsolute" { defer arena.deinit(); const allocator = arena.allocator(); - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &.{ ".zig-cache", "tmp", tmp_dir.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; + const base_path = try tmp_dir.dir.realpathAlloc(allocator, "."); try testing.expectError(error.FileNotFound, fs.renameAbsolute( try fs.path.join(allocator, &.{ base_path, "missing_file_name" }), @@ -1386,7 +1412,6 @@ test "sendfile" { defer tmp.cleanup(); try tmp.dir.makePath("os_test_tmp"); - defer tmp.dir.deleteTree("os_test_tmp") catch {}; var dir = try tmp.dir.openDir("os_test_tmp", .{}); defer dir.close(); @@ -1451,7 +1476,6 @@ test "copyRangeAll" { defer tmp.cleanup(); try tmp.dir.makePath("os_test_tmp"); - defer tmp.dir.deleteTree("os_test_tmp") catch {}; var dir = try tmp.dir.openDir("os_test_tmp", .{}); defer dir.close(); @@ -1800,10 +1824,7 @@ test "'.' and '..' in absolute functions" { defer arena.deinit(); const allocator = arena.allocator(); - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &.{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; + const base_path = try tmp.dir.realpathAlloc(allocator, "."); const subdir_path = try fs.path.join(allocator, &.{ base_path, "./subdir" }); try fs.makeDirAbsolute(subdir_path); diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index a1eae13efcd8..106460387abc 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -6,9 +6,14 @@ const maxInt = std.math.maxInt; const assert = std.debug.assert; const native_os = builtin.os.tag; const windows = std.os.windows; +const ntdll = windows.ntdll; const posix = std.posix; const page_size_min = std.heap.page_size_min; +const SUCCESS = @import("../os/windows/ntstatus.zig").NTSTATUS.SUCCESS; +const MEM_RESERVE_PLACEHOLDER = windows.MEM_RESERVE_PLACEHOLDER; +const MEM_PRESERVE_PLACEHOLDER = windows.MEM_PRESERVE_PLACEHOLDER; + pub const vtable: Allocator.VTable = .{ .alloc = alloc, .resize = resize, @@ -22,51 +27,62 @@ pub fn map(n: usize, alignment: mem.Alignment) ?[*]u8 { const alignment_bytes = alignment.toByteUnits(); if (native_os == .windows) { - // According to official documentation, VirtualAlloc aligns to page - // boundary, however, empirically it reserves pages on a 64K boundary. - // Since it is very likely the requested alignment will be honored, - // this logic first tries a call with exactly the size requested, - // before falling back to the loop below. - // https://devblogs.microsoft.com/oldnewthing/?p=42223 - const addr = windows.VirtualAlloc( - null, - // VirtualAlloc will round the length to a multiple of page size. - // "If the lpAddress parameter is NULL, this value is rounded up to - // the next page boundary". - n, - windows.MEM_COMMIT | windows.MEM_RESERVE, - windows.PAGE_READWRITE, - ) catch return null; - - if (mem.isAligned(@intFromPtr(addr), alignment_bytes)) - return @ptrCast(addr); - - // Fallback: reserve a range of memory large enough to find a - // sufficiently aligned address, then free the entire range and - // immediately allocate the desired subset. Another thread may have won - // the race to map the target range, in which case a retry is needed. - windows.VirtualFree(addr, 0, windows.MEM_RELEASE); + var base_addr: ?*anyopaque = null; + var size: windows.SIZE_T = n; + + var status = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), 0, &size, windows.MEM_COMMIT | windows.MEM_RESERVE, windows.PAGE_READWRITE); + + if (status == SUCCESS and mem.isAligned(@intFromPtr(base_addr), alignment_bytes)) { + return @ptrCast(base_addr); + } + + if (status == SUCCESS) { + var region_size: windows.SIZE_T = 0; + _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), ®ion_size, windows.MEM_RELEASE); + } const overalloc_len = n + alignment_bytes - page_size; const aligned_len = mem.alignForward(usize, n, page_size); - while (true) { - const reserved_addr = windows.VirtualAlloc( - null, - overalloc_len, - windows.MEM_RESERVE, - windows.PAGE_NOACCESS, - ) catch return null; - const aligned_addr = mem.alignForward(usize, @intFromPtr(reserved_addr), alignment_bytes); - windows.VirtualFree(reserved_addr, 0, windows.MEM_RELEASE); - const ptr = windows.VirtualAlloc( - @ptrFromInt(aligned_addr), - aligned_len, - windows.MEM_COMMIT | windows.MEM_RESERVE, - windows.PAGE_READWRITE, - ) catch continue; - return @ptrCast(ptr); + base_addr = null; + size = overalloc_len; + + status = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), 0, &size, windows.MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, windows.PAGE_NOACCESS); + + if (status != SUCCESS) return null; + + const placeholder_addr = @intFromPtr(base_addr); + const aligned_addr = mem.alignForward(usize, placeholder_addr, alignment_bytes); + const prefix_size = aligned_addr - placeholder_addr; + + if (prefix_size > 0) { + var prefix_base = base_addr; + var prefix_size_param: windows.SIZE_T = prefix_size; + _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&prefix_base), &prefix_size_param, windows.MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER); } + + const suffix_start = aligned_addr + aligned_len; + const suffix_size = (placeholder_addr + overalloc_len) - suffix_start; + if (suffix_size > 0) { + var suffix_base = @as(?*anyopaque, @ptrFromInt(suffix_start)); + var suffix_size_param: windows.SIZE_T = suffix_size; + _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&suffix_base), &suffix_size_param, windows.MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER); + } + + base_addr = @ptrFromInt(aligned_addr); + size = aligned_len; + + status = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), 0, &size, windows.MEM_COMMIT | MEM_PRESERVE_PLACEHOLDER, windows.PAGE_READWRITE); + + if (status == SUCCESS) { + return @ptrCast(base_addr); + } + + base_addr = @as(?*anyopaque, @ptrFromInt(aligned_addr)); + size = aligned_len; + _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), &size, windows.MEM_RELEASE); + + return null; } const aligned_len = mem.alignForward(usize, n, page_size); @@ -104,26 +120,14 @@ fn alloc(context: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[* return map(n, alignment); } -fn resize( - context: *anyopaque, - memory: []u8, - alignment: mem.Alignment, - new_len: usize, - return_address: usize, -) bool { +fn resize(context: *anyopaque, memory: []u8, alignment: mem.Alignment, new_len: usize, return_address: usize) bool { _ = context; _ = alignment; _ = return_address; return realloc(memory, new_len, false) != null; } -fn remap( - context: *anyopaque, - memory: []u8, - alignment: mem.Alignment, - new_len: usize, - return_address: usize, -) ?[*]u8 { +fn remap(context: *anyopaque, memory: []u8, alignment: mem.Alignment, new_len: usize, return_address: usize) ?[*]u8 { _ = context; _ = alignment; _ = return_address; @@ -139,7 +143,9 @@ fn free(context: *anyopaque, memory: []u8, alignment: mem.Alignment, return_addr pub fn unmap(memory: []align(page_size_min) u8) void { if (native_os == .windows) { - windows.VirtualFree(memory.ptr, 0, windows.MEM_RELEASE); + var base_addr: ?*anyopaque = memory.ptr; + var region_size: windows.SIZE_T = 0; + _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), ®ion_size, windows.MEM_RELEASE); } else { const page_aligned_len = mem.alignForward(usize, memory.len, std.heap.pageSize()); posix.munmap(memory.ptr[0..page_aligned_len]); @@ -157,13 +163,10 @@ pub fn realloc(uncasted_memory: []u8, new_len: usize, may_move: bool) ?[*]u8 { const old_addr_end = base_addr + memory.len; const new_addr_end = mem.alignForward(usize, base_addr + new_len, page_size); if (old_addr_end > new_addr_end) { - // For shrinking that is not releasing, we will only decommit - // the pages not needed anymore. - windows.VirtualFree( - @ptrFromInt(new_addr_end), - old_addr_end - new_addr_end, - windows.MEM_DECOMMIT, - ); + var decommit_addr: ?*anyopaque = @ptrFromInt(new_addr_end); + var decommit_size: windows.SIZE_T = old_addr_end - new_addr_end; + + _ = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&decommit_addr), 0, &decommit_size, windows.MEM_RESET, windows.PAGE_NOACCESS); } return memory.ptr; } diff --git a/lib/std/io/test.zig b/lib/std/io/test.zig index 6505fcd4facf..523b25c9c8f5 100644 --- a/lib/std/io/test.zig +++ b/lib/std/io/test.zig @@ -108,10 +108,7 @@ test "File seek ops" { const tmp_file_name = "temp_test_file.txt"; var file = try tmp.dir.createFile(tmp_file_name, .{}); - defer { - file.close(); - tmp.dir.deleteFile(tmp_file_name) catch {}; - } + defer file.close(); try file.writeAll(&([_]u8{0x55} ** 8192)); @@ -135,10 +132,7 @@ test "setEndPos" { const tmp_file_name = "temp_test_file.txt"; var file = try tmp.dir.createFile(tmp_file_name, .{}); - defer { - file.close(); - tmp.dir.deleteFile(tmp_file_name) catch {}; - } + defer file.close(); // Verify that the file size changes and the file offset is not moved try std.testing.expect((try file.getEndPos()) == 0); @@ -161,10 +155,8 @@ test "updateTimes" { const tmp_file_name = "just_a_temporary_file.txt"; var file = try tmp.dir.createFile(tmp_file_name, .{ .read = true }); - defer { - file.close(); - tmp.dir.deleteFile(tmp_file_name) catch {}; - } + defer file.close(); + const stat_old = try file.stat(); // Set atime and mtime to 5s before try file.updateTimes( diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index 7d68322c90a0..0df825187bc1 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -248,8 +248,8 @@ pub fn MultiArrayList(comptime T: type) type { /// Extend the list by 1 element, returning the newly reserved /// index with uninitialized data. /// Allocates more memory as necesasry. - pub fn addOne(self: *Self, allocator: Allocator) Allocator.Error!usize { - try self.ensureUnusedCapacity(allocator, 1); + pub fn addOne(self: *Self, gpa: Allocator) Allocator.Error!usize { + try self.ensureUnusedCapacity(gpa, 1); return self.addOneAssumeCapacity(); } diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig index f7e51c63aab6..9674e704dccb 100644 --- a/lib/std/os/uefi.zig +++ b/lib/std/os/uefi.zig @@ -141,11 +141,10 @@ pub const Time = extern struct { pub const unspecified_timezone: i16 = 0x7ff; fn daysInYear(year: u16, max_month: u4) u9 { - const leap_year: std.time.epoch.YearLeapKind = if (std.time.epoch.isLeapYear(year)) .leap else .not_leap; var days: u9 = 0; var month: u4 = 0; while (month < max_month) : (month += 1) { - days += std.time.epoch.getDaysInMonth(leap_year, @enumFromInt(month + 1)); + days += std.time.epoch.getDaysInMonth(year, @enumFromInt(month + 1)); } return days; } diff --git a/lib/std/os/uefi/status.zig b/lib/std/os/uefi/status.zig index 5c8652019421..2e17ef64c98a 100644 --- a/lib/std/os/uefi/status.zig +++ b/lib/std/os/uefi/status.zig @@ -231,11 +231,57 @@ pub const Status = enum(usize) { else => {}, } } + + pub fn fromError(e: Error) Status { + return switch (e) { + Error.Aborted => .aborted, + Error.AccessDenied => .access_denied, + Error.AlreadyStarted => .already_started, + Error.BadBufferSize => .bad_buffer_size, + Error.BufferTooSmall => .buffer_too_small, + Error.CompromisedData => .compromised_data, + Error.ConnectionFin => .connection_fin, + Error.ConnectionRefused => .connection_refused, + Error.ConnectionReset => .connection_reset, + Error.CrcError => .crc_error, + Error.DeviceError => .device_error, + Error.EndOfFile => .end_of_file, + Error.EndOfMedia => .end_of_media, + Error.HostUnreachable => .host_unreachable, + Error.HttpError => .http_error, + Error.IcmpError => .icmp_error, + Error.IncompatibleVersion => .incompatible_version, + Error.InvalidLanguage => .invalid_language, + Error.InvalidParameter => .invalid_parameter, + Error.IpAddressConflict => .ip_address_conflict, + Error.LoadError => .load_error, + Error.MediaChanged => .media_changed, + Error.NetworkUnreachable => .network_unreachable, + Error.NoMapping => .no_mapping, + Error.NoMedia => .no_media, + Error.NoResponse => .no_response, + Error.NotFound => .not_found, + Error.NotReady => .not_ready, + Error.NotStarted => .not_started, + Error.OutOfResources => .out_of_resources, + Error.PortUnreachable => .port_unreachable, + Error.ProtocolError => .protocol_error, + Error.ProtocolUnreachable => .protocol_unreachable, + Error.SecurityViolation => .security_violation, + Error.TftpError => .tftp_error, + Error.Timeout => .timeout, + Error.Unsupported => .unsupported, + Error.VolumeCorrupted => .volume_corrupted, + Error.VolumeFull => .volume_full, + Error.WriteProtected => .write_protected, + }; + } }; test "status" { var st: Status = .device_error; try testing.expectError(error.DeviceError, st.err()); + try testing.expectEqual(st, Status.fromError(st.err())); st = .success; try st.err(); diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index 563b24cf83c3..c29eb0f05a14 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -1758,6 +1758,38 @@ pub fn TerminateProcess(hProcess: HANDLE, uExitCode: UINT) TerminateProcessError } } +pub const NtAllocateVirtualMemoryError = error{ + AccessDenied, + InvalidParameter, + NoMemory, + Unexpected, +}; + +pub fn NtAllocateVirtualMemory(hProcess: HANDLE, addr: ?*PVOID, zero_bits: ULONG_PTR, size: ?*SIZE_T, alloc_type: ULONG, protect: ULONG) NtAllocateVirtualMemoryError!void { + return switch (ntdll.NtAllocateVirtualMemory(hProcess, addr, zero_bits, size, alloc_type, protect)) { + .SUCCESS => return, + .ACCESS_DENIED => NtAllocateVirtualMemoryError.AccessDenied, + .INVALID_PARAMETER => NtAllocateVirtualMemoryError.InvalidParameter, + .NO_MEMORY => NtAllocateVirtualMemoryError.NoMemory, + else => |st| unexpectedStatus(st), + }; +} + +pub const NtFreeVirtualMemoryError = error{ + AccessDenied, + InvalidParameter, + Unexpected, +}; + +pub fn NtFreeVirtualMemory(hProcess: HANDLE, addr: ?*PVOID, size: *SIZE_T, free_type: ULONG) NtFreeVirtualMemoryError!void { + return switch (ntdll.NtFreeVirtualMemory(hProcess, addr, size, free_type)) { + .SUCCESS => return, + .ACCESS_DENIED => NtFreeVirtualMemoryError.AccessDenied, + .INVALID_PARAMETER => NtFreeVirtualMemoryError.InvalidParameter, + else => NtFreeVirtualMemoryError.Unexpected, + }; +} + pub const VirtualAllocError = error{Unexpected}; pub fn VirtualAlloc(addr: ?LPVOID, size: usize, alloc_type: DWORD, flProtect: DWORD) VirtualAllocError!LPVOID { @@ -3539,6 +3571,8 @@ pub const MEM_LARGE_PAGES = 0x20000000; pub const MEM_PHYSICAL = 0x400000; pub const MEM_TOP_DOWN = 0x100000; pub const MEM_WRITE_WATCH = 0x200000; +pub const MEM_RESERVE_PLACEHOLDER = 0x00040000; +pub const MEM_PRESERVE_PLACEHOLDER = 0x00000400; // Protect values pub const PAGE_EXECUTE = 0x10; diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig index 490deaeacc5f..474aea3330c7 100644 --- a/lib/std/os/windows/ntdll.zig +++ b/lib/std/os/windows/ntdll.zig @@ -5,6 +5,7 @@ const BOOL = windows.BOOL; const DWORD = windows.DWORD; const DWORD64 = windows.DWORD64; const ULONG = windows.ULONG; +const ULONG_PTR = windows.ULONG_PTR; const NTSTATUS = windows.NTSTATUS; const WORD = windows.WORD; const HANDLE = windows.HANDLE; @@ -358,3 +359,19 @@ pub extern "ntdll" fn NtCreateNamedPipeFile( OutboundQuota: ULONG, DefaultTimeout: *LARGE_INTEGER, ) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtAllocateVirtualMemory( + ProcessHandle: HANDLE, + BaseAddress: ?*PVOID, + ZeroBits: ULONG_PTR, + RegionSize: ?*SIZE_T, + AllocationType: ULONG, + PageProtection: ULONG, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtFreeVirtualMemory( + ProcessHandle: HANDLE, + BaseAddress: ?*PVOID, + RegionSize: *SIZE_T, + FreeType: ULONG, +) callconv(.winapi) NTSTATUS; diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 29756cae69a7..9678bfb261ce 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -8,7 +8,6 @@ const io = std.io; const fs = std.fs; const mem = std.mem; const elf = std.elf; -const File = std.fs.File; const Thread = std.Thread; const linux = std.os.linux; @@ -19,8 +18,6 @@ const AtomicRmwOp = std.builtin.AtomicRmwOp; const AtomicOrder = std.builtin.AtomicOrder; const native_os = builtin.target.os.tag; const tmpDir = std.testing.tmpDir; -const Dir = std.fs.Dir; -const ArenaAllocator = std.heap.ArenaAllocator; // https://github.com/ziglang/zig/issues/20288 test "WTF-8 to WTF-16 conversion buffer overflows" { @@ -115,50 +112,62 @@ test "open smoke test" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - // Get base abs path - var arena = ArenaAllocator.init(testing.allocator); - defer arena.deinit(); - const allocator = arena.allocator(); + const base_path = try tmp.dir.realpathAlloc(a, "."); + defer a.free(base_path); - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &[_][]const u8{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; - - var file_path: []u8 = undefined; - var fd: posix.fd_t = undefined; const mode: posix.mode_t = if (native_os == .windows) 0 else 0o666; - // Create some file using `open`. - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode); - posix.close(fd); + { + // Create some file using `open`. + const file_path = try fs.path.join(a, &.{ base_path, "some_file" }); + defer a.free(file_path); + const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode); + posix.close(fd); + } - // Try this again with the same flags. This op should fail with error.PathAlreadyExists. - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - try expectError(error.PathAlreadyExists, posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode)); + { + // Try this again with the same flags. This op should fail with error.PathAlreadyExists. + const file_path = try fs.path.join(a, &.{ base_path, "some_file" }); + defer a.free(file_path); + try expectError(error.PathAlreadyExists, posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode)); + } - // Try opening without `EXCL` flag. - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true }, mode); - posix.close(fd); + { + // Try opening without `EXCL` flag. + const file_path = try fs.path.join(a, &.{ base_path, "some_file" }); + defer a.free(file_path); + const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true }, mode); + posix.close(fd); + } - // Try opening as a directory which should fail. - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - try expectError(error.NotDir, posix.open(file_path, .{ .ACCMODE = .RDWR, .DIRECTORY = true }, mode)); + { + // Try opening as a directory which should fail. + const file_path = try fs.path.join(a, &.{ base_path, "some_file" }); + defer a.free(file_path); + try expectError(error.NotDir, posix.open(file_path, .{ .ACCMODE = .RDWR, .DIRECTORY = true }, mode)); + } - // Create some directory - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); - try posix.mkdir(file_path, mode); + { + // Create some directory + const file_path = try fs.path.join(a, &.{ base_path, "some_dir" }); + defer a.free(file_path); + try posix.mkdir(file_path, mode); + } - // Open dir using `open` - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); - fd = try posix.open(file_path, .{ .ACCMODE = .RDONLY, .DIRECTORY = true }, mode); - posix.close(fd); + { + // Open dir using `open` + const file_path = try fs.path.join(a, &.{ base_path, "some_dir" }); + defer a.free(file_path); + const fd = try posix.open(file_path, .{ .ACCMODE = .RDONLY, .DIRECTORY = true }, mode); + posix.close(fd); + } - // Try opening as file which should fail. - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); - try expectError(error.IsDir, posix.open(file_path, .{ .ACCMODE = .RDWR }, mode)); + { + // Try opening as file which should fail. + const file_path = try fs.path.join(a, &.{ base_path, "some_dir" }); + defer a.free(file_path); + try expectError(error.IsDir, posix.open(file_path, .{ .ACCMODE = .RDWR }, mode)); + } } test "openat smoke test" { @@ -705,8 +714,6 @@ test "mmap" { try testing.expectEqual(i, try stream.readInt(u32, .little)); } } - - try tmp.dir.deleteFile(test_out_file); } test "getenv" { @@ -732,10 +739,7 @@ test "fcntl" { const test_out_file = "os_tmp_test"; const file = try tmp.dir.createFile(test_out_file, .{}); - defer { - file.close(); - tmp.dir.deleteFile(test_out_file) catch {}; - } + defer file.close(); // Note: The test assumes createFile opens the file with CLOEXEC { @@ -771,10 +775,7 @@ test "sync" { const test_out_file = "os_tmp_test"; const file = try tmp.dir.createFile(test_out_file, .{}); - defer { - file.close(); - tmp.dir.deleteFile(test_out_file) catch {}; - } + defer file.close(); posix.sync(); try posix.syncfs(file.handle); @@ -791,10 +792,7 @@ test "fsync" { const test_out_file = "os_tmp_test"; const file = try tmp.dir.createFile(test_out_file, .{}); - defer { - file.close(); - tmp.dir.deleteFile(test_out_file) catch {}; - } + defer file.close(); try posix.fsync(file.handle); try posix.fdatasync(file.handle); @@ -1041,54 +1039,65 @@ test "rename smoke test" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - // Get base abs path - var arena = ArenaAllocator.init(testing.allocator); - defer arena.deinit(); - const allocator = arena.allocator(); - - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &[_][]const u8{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; + const base_path = try tmp.dir.realpathAlloc(a, "."); + defer a.free(base_path); - var file_path: []u8 = undefined; - var fd: posix.fd_t = undefined; const mode: posix.mode_t = if (native_os == .windows) 0 else 0o666; - // Create some file using `open`. - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode); - posix.close(fd); - - // Rename the file - var new_file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_other_file" }); - try posix.rename(file_path, new_file_path); - - // Try opening renamed file - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_other_file" }); - fd = try posix.open(file_path, .{ .ACCMODE = .RDWR }, mode); - posix.close(fd); + { + // Create some file using `open`. + const file_path = try fs.path.join(a, &.{ base_path, "some_file" }); + defer a.free(file_path); + const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode); + posix.close(fd); + + // Rename the file + const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_file" }); + defer a.free(new_file_path); + try posix.rename(file_path, new_file_path); + } - // Try opening original file - should fail with error.FileNotFound - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - try expectError(error.FileNotFound, posix.open(file_path, .{ .ACCMODE = .RDWR }, mode)); + { + // Try opening renamed file + const file_path = try fs.path.join(a, &.{ base_path, "some_other_file" }); + defer a.free(file_path); + const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR }, mode); + posix.close(fd); + } - // Create some directory - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); - try posix.mkdir(file_path, mode); + { + // Try opening original file - should fail with error.FileNotFound + const file_path = try fs.path.join(a, &.{ base_path, "some_file" }); + defer a.free(file_path); + try expectError(error.FileNotFound, posix.open(file_path, .{ .ACCMODE = .RDWR }, mode)); + } - // Rename the directory - new_file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_other_dir" }); - try posix.rename(file_path, new_file_path); + { + // Create some directory + const file_path = try fs.path.join(a, &.{ base_path, "some_dir" }); + defer a.free(file_path); + try posix.mkdir(file_path, mode); + + // Rename the directory + const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_dir" }); + defer a.free(new_file_path); + try posix.rename(file_path, new_file_path); + } - // Try opening renamed directory - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_other_dir" }); - fd = try posix.open(file_path, .{ .ACCMODE = .RDONLY, .DIRECTORY = true }, mode); - posix.close(fd); + { + // Try opening renamed directory + const file_path = try fs.path.join(a, &.{ base_path, "some_other_dir" }); + defer a.free(file_path); + const fd = try posix.open(file_path, .{ .ACCMODE = .RDONLY, .DIRECTORY = true }, mode); + posix.close(fd); + } - // Try opening original directory - should fail with error.FileNotFound - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); - try expectError(error.FileNotFound, posix.open(file_path, .{ .ACCMODE = .RDONLY, .DIRECTORY = true }, mode)); + { + // Try opening original directory - should fail with error.FileNotFound + const file_path = try fs.path.join(a, &.{ base_path, "some_dir" }); + defer a.free(file_path); + try expectError(error.FileNotFound, posix.open(file_path, .{ .ACCMODE = .RDONLY, .DIRECTORY = true }, mode)); + } } test "access smoke test" { @@ -1098,44 +1107,50 @@ test "access smoke test" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - // Get base abs path - var arena = ArenaAllocator.init(testing.allocator); - defer arena.deinit(); - const allocator = arena.allocator(); - - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &[_][]const u8{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; + const base_path = try tmp.dir.realpathAlloc(a, "."); + defer a.free(base_path); - var file_path: []u8 = undefined; - var fd: posix.fd_t = undefined; const mode: posix.mode_t = if (native_os == .windows) 0 else 0o666; + { + // Create some file using `open`. + const file_path = try fs.path.join(a, &.{ base_path, "some_file" }); + defer a.free(file_path); + const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode); + posix.close(fd); + } - // Create some file using `open`. - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode); - posix.close(fd); + { + // Try to access() the file + const file_path = try fs.path.join(a, &.{ base_path, "some_file" }); + defer a.free(file_path); + if (native_os == .windows) { + try posix.access(file_path, posix.F_OK); + } else { + try posix.access(file_path, posix.F_OK | posix.W_OK | posix.R_OK); + } + } - // Try to access() the file - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - if (native_os == .windows) { - try posix.access(file_path, posix.F_OK); - } else { - try posix.access(file_path, posix.F_OK | posix.W_OK | posix.R_OK); + { + // Try to access() a non-existent file - should fail with error.FileNotFound + const file_path = try fs.path.join(a, &.{ base_path, "some_other_file" }); + defer a.free(file_path); + try expectError(error.FileNotFound, posix.access(file_path, posix.F_OK)); } - // Try to access() a non-existent file - should fail with error.FileNotFound - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_other_file" }); - try expectError(error.FileNotFound, posix.access(file_path, posix.F_OK)); + { + // Create some directory + const file_path = try fs.path.join(a, &.{ base_path, "some_dir" }); + defer a.free(file_path); + try posix.mkdir(file_path, mode); + } - // Create some directory - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); - try posix.mkdir(file_path, mode); + { + // Try to access() the directory + const file_path = try fs.path.join(a, &.{ base_path, "some_dir" }); + defer a.free(file_path); - // Try to access() the directory - file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); - try posix.access(file_path, posix.F_OK); + try posix.access(file_path, posix.F_OK); + } } test "timerfd" { @@ -1167,103 +1182,59 @@ test "isatty" { } test "read with empty buffer" { - if (native_os == .wasi) return error.SkipZigTest; - var tmp = tmpDir(.{}); defer tmp.cleanup(); - var arena = ArenaAllocator.init(testing.allocator); - defer arena.deinit(); - const allocator = arena.allocator(); - - // Get base abs path - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &[_][]const u8{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; - - const file_path: []u8 = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - var file = try fs.cwd().createFile(file_path, .{ .read = true }); + var file = try tmp.dir.createFile("read_empty", .{ .read = true }); defer file.close(); - const bytes = try allocator.alloc(u8, 0); + const bytes = try a.alloc(u8, 0); + defer a.free(bytes); - _ = try posix.read(file.handle, bytes); + const rc = try posix.read(file.handle, bytes); + try expectEqual(rc, 0); } test "pread with empty buffer" { - if (native_os == .wasi) return error.SkipZigTest; - var tmp = tmpDir(.{}); defer tmp.cleanup(); - var arena = ArenaAllocator.init(testing.allocator); - defer arena.deinit(); - const allocator = arena.allocator(); - - // Get base abs path - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &[_][]const u8{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; - - const file_path: []u8 = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - var file = try fs.cwd().createFile(file_path, .{ .read = true }); + var file = try tmp.dir.createFile("pread_empty", .{ .read = true }); defer file.close(); - const bytes = try allocator.alloc(u8, 0); + const bytes = try a.alloc(u8, 0); + defer a.free(bytes); - _ = try posix.pread(file.handle, bytes, 0); + const rc = try posix.pread(file.handle, bytes, 0); + try expectEqual(rc, 0); } test "write with empty buffer" { - if (native_os == .wasi) return error.SkipZigTest; - var tmp = tmpDir(.{}); defer tmp.cleanup(); - var arena = ArenaAllocator.init(testing.allocator); - defer arena.deinit(); - const allocator = arena.allocator(); - - // Get base abs path - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &[_][]const u8{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; - - const file_path: []u8 = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - var file = try fs.cwd().createFile(file_path, .{}); + var file = try tmp.dir.createFile("write_empty", .{}); defer file.close(); - const bytes = try allocator.alloc(u8, 0); + const bytes = try a.alloc(u8, 0); + defer a.free(bytes); - _ = try posix.write(file.handle, bytes); + const rc = try posix.write(file.handle, bytes); + try expectEqual(rc, 0); } test "pwrite with empty buffer" { - if (native_os == .wasi) return error.SkipZigTest; - var tmp = tmpDir(.{}); defer tmp.cleanup(); - var arena = ArenaAllocator.init(testing.allocator); - defer arena.deinit(); - const allocator = arena.allocator(); - - // Get base abs path - const base_path = blk: { - const relative_path = try fs.path.join(allocator, &[_][]const u8{ ".zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(allocator, relative_path); - }; - - const file_path: []u8 = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); - var file = try fs.cwd().createFile(file_path, .{}); + var file = try tmp.dir.createFile("pwrite_empty", .{}); defer file.close(); - const bytes = try allocator.alloc(u8, 0); + const bytes = try a.alloc(u8, 0); + defer a.free(bytes); - _ = try posix.pwrite(file.handle, bytes, 0); + const rc = try posix.pwrite(file.handle, bytes, 0); + try expectEqual(rc, 0); } fn expectMode(dir: posix.fd_t, file: []const u8, mode: posix.mode_t) !void { diff --git a/lib/std/time/epoch.zig b/lib/std/time/epoch.zig index b409d3e9ebf8..fa7499aec7ec 100644 --- a/lib/std/time/epoch.zig +++ b/lib/std/time/epoch.zig @@ -64,8 +64,6 @@ pub fn getDaysInYear(year: Year) u9 { return if (isLeapYear(year)) 366 else 365; } -pub const YearLeapKind = enum(u1) { not_leap, leap }; - pub const Month = enum(u4) { jan = 1, feb, @@ -87,13 +85,13 @@ pub const Month = enum(u4) { } }; -/// Get the number of days in the given month -pub fn getDaysInMonth(leap_year: YearLeapKind, month: Month) u5 { +/// Get the number of days in the given month and year +pub fn getDaysInMonth(year: Year, month: Month) u5 { return switch (month) { .jan => 31, - .feb => @as(u5, switch (leap_year) { - .leap => 29, - .not_leap => 28, + .feb => @as(u5, switch (isLeapYear(year)) { + true => 29, + false => 28, }), .mar => 31, .apr => 30, @@ -116,9 +114,8 @@ pub const YearAndDay = struct { pub fn calculateMonthDay(self: YearAndDay) MonthAndDay { var month: Month = .jan; var days_left = self.day; - const leap_kind: YearLeapKind = if (isLeapYear(self.year)) .leap else .not_leap; while (true) { - const days_in_month = getDaysInMonth(leap_kind, month); + const days_in_month = getDaysInMonth(self.year, month); if (days_left < days_in_month) break; days_left -= days_in_month; diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 84f9cf7330b4..a4b16d6c46e5 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -539,7 +539,7 @@ pub fn abiAndDynamicLinkerFromFile( var result: Target = .{ .cpu = cpu, .os = os, - .abi = query.abi orelse Target.Abi.default(cpu.arch, os), + .abi = query.abi orelse Target.Abi.default(cpu.arch, os.tag), .ofmt = query.ofmt orelse Target.ObjectFormat.default(os.tag, cpu.arch), .dynamic_linker = query.dynamic_linker, }; @@ -1213,7 +1213,7 @@ fn detectAbiAndDynamicLinker( } fn defaultAbiAndDynamicLinker(cpu: Target.Cpu, os: Target.Os, query: Target.Query) Target { - const abi = query.abi orelse Target.Abi.default(cpu.arch, os); + const abi = query.abi orelse Target.Abi.default(cpu.arch, os.tag); return .{ .cpu = cpu, .os = os, diff --git a/src/Compilation.zig b/src/Compilation.zig index 12221ba3dc04..b781a10493c2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5631,7 +5631,10 @@ pub fn addCCArgs( } if (target_util.llvmMachineAbi(target)) |mabi| { - try argv.append(try std.fmt.allocPrint(arena, "-mabi={s}", .{mabi})); + // Clang's integrated Arm assembler doesn't support `-mabi` yet... + if (!(target.cpu.arch.isArm() and (ext == .assembly or ext == .assembly_with_cpp))) { + try argv.append(try std.fmt.allocPrint(arena, "-mabi={s}", .{mabi})); + } } // We might want to support -mfloat-abi=softfp for Arm and CSKY here in the future. @@ -6026,17 +6029,19 @@ pub fn addCCArgs( // function was called. try argv.append("-fno-sanitize=function"); - // It's recommended to use the minimal runtime in production environments - // due to the security implications of the full runtime. The minimal runtime - // doesn't provide much benefit over simply trapping. if (mod.optimize_mode == .ReleaseSafe) { + // It's recommended to use the minimal runtime in production + // environments due to the security implications of the full runtime. + // The minimal runtime doesn't provide much benefit over simply + // trapping, however, so we do that instead. try argv.append("-fsanitize-trap=undefined"); - } - - // This is necessary because, by default, Clang instructs LLVM to embed a COFF link - // dependency on `libclang_rt.ubsan_standalone.a` when the UBSan runtime is used. - if (target.os.tag == .windows) { - try argv.append("-fno-rtlib-defaultlib"); + } else { + // This is necessary because, by default, Clang instructs LLVM to embed + // a COFF link dependency on `libclang_rt.ubsan_standalone.a` when the + // UBSan runtime is used. + if (target.os.tag == .windows) { + try argv.append("-fno-rtlib-defaultlib"); + } } } } diff --git a/src/Sema.zig b/src/Sema.zig index fab08c0d7447..5fa933622e14 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36756,7 +36756,7 @@ fn unionFields( if (enum_index != field_i) { const msg = msg: { const enum_field_src: LazySrcLoc = .{ - .base_node_inst = tag_info.zir_index.unwrap().?, + .base_node_inst = Type.fromInterned(tag_ty).typeDeclInstAllowGeneratedTag(zcu).?, .offset = .{ .container_field_name = enum_index }, }; const msg = try sema.errMsg(name_src, "union field '{}' ordered differently than corresponding enum field", .{ @@ -38066,6 +38066,11 @@ fn compareScalar( const pt = sema.pt; const coerced_lhs = try pt.getCoerced(lhs, ty); const coerced_rhs = try pt.getCoerced(rhs, ty); + + // Equality comparisons of signed zero and NaN need to use floating point semantics + if (coerced_lhs.isFloat(pt.zcu) or coerced_rhs.isFloat(pt.zcu)) + return Value.compareHeteroSema(coerced_lhs, op, coerced_rhs, pt); + switch (op) { .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), diff --git a/src/Value.zig b/src/Value.zig index 384ee43655cc..9ae97676692d 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -1142,6 +1142,8 @@ pub fn compareHeteroAdvanced( else => {}, } } + + if (lhs.isNan(zcu) or rhs.isNan(zcu)) return op == .neq; return (try orderAdvanced(lhs, rhs, strat, zcu, tid)).compare(op); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 4af9b0125707..9141646e4dce 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2437,7 +2437,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { try cg.airArg(inst); - try cg.resetTemps(); + try cg.resetTemps(@enumFromInt(0)); cg.checkInvariantsAfterAirInst(); }, else => break, @@ -2477,7 +2477,6 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .shuffle => try cg.airShuffle(inst), .reduce => try cg.airReduce(inst), .reduce_optimized => try cg.airReduce(inst), - .aggregate_init => try cg.airAggregateInit(inst), // zig fmt: on .arg => if (cg.debug_output != .none) { @@ -80843,6 +80842,74 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { for (ops[1..]) |op| try op.die(cg); try res[0].finish(inst, &.{ty_op.operand}, ops[0..1], cg); }, + .aggregate_init => |air_tag| if (use_old) try cg.airAggregateInit(inst) else fallback: { + const ty_pl = air_datas[@intFromEnum(inst)].ty_pl; + const agg_ty = ty_pl.ty.toType(); + if ((agg_ty.isVector(zcu) and agg_ty.childType(zcu).toIntern() == .bool_type) or + (agg_ty.zigTypeTag(zcu) == .@"struct" and agg_ty.containerLayout(zcu) == .@"packed")) break :fallback try cg.airAggregateInit(inst); + var res = try cg.tempAllocMem(agg_ty); + const reset_index = cg.next_temp_index; + var bt = cg.liveness.iterateBigTomb(inst); + switch (ip.indexToKey(agg_ty.toIntern())) { + inline .array_type, .vector_type => |sequence_type| { + const elems: []const Air.Inst.Ref = @ptrCast(cg.air.extra[ty_pl.payload..][0..@intCast(sequence_type.len)]); + const elem_size = Type.fromInterned(sequence_type.child).abiSize(zcu); + var elem_disp: u31 = 0; + for (elems) |elem_ref| { + var elem = try cg.tempFromOperand(elem_ref, bt.feed()); + try res.write(&elem, .{ .disp = elem_disp }, cg); + try elem.die(cg); + try cg.resetTemps(reset_index); + elem_disp += @intCast(elem_size); + } + if (@hasField(@TypeOf(sequence_type), "sentinel") and sequence_type.sentinel != .none) { + var sentinel = try cg.tempFromValue(.fromInterned(sequence_type.sentinel)); + try res.write(&sentinel, .{ .disp = elem_disp }, cg); + try sentinel.die(cg); + } + }, + .struct_type => { + const loaded_struct = ip.loadStructType(agg_ty.toIntern()); + const elems: []const Air.Inst.Ref = @ptrCast(cg.air.extra[ty_pl.payload..][0..loaded_struct.field_types.len]); + switch (loaded_struct.layout) { + .auto, .@"extern" => { + for (elems, 0..) |elem_ref, field_index| { + const elem_dies = bt.feed(); + if (loaded_struct.fieldIsComptime(ip, field_index)) continue; + var elem = try cg.tempFromOperand(elem_ref, elem_dies); + try res.write(&elem, .{ .disp = @intCast(loaded_struct.offsets.get(ip)[field_index]) }, cg); + try elem.die(cg); + try cg.resetTemps(reset_index); + } + }, + .@"packed" => return cg.fail("failed to select {s} {}", .{ + @tagName(air_tag), + agg_ty.fmt(pt), + }), + } + }, + .tuple_type => |tuple_type| { + const elems: []const Air.Inst.Ref = @ptrCast(cg.air.extra[ty_pl.payload..][0..tuple_type.types.len]); + var elem_disp: u31 = 0; + for (elems, 0..) |elem_ref, field_index| { + const elem_dies = bt.feed(); + if (tuple_type.values.get(ip)[field_index] != .none) continue; + const field_type = Type.fromInterned(tuple_type.types.get(ip)[field_index]); + elem_disp = @intCast(field_type.abiAlignment(zcu).forward(elem_disp)); + var elem = try cg.tempFromOperand(elem_ref, elem_dies); + try res.write(&elem, .{ .disp = elem_disp }, cg); + try elem.die(cg); + try cg.resetTemps(reset_index); + elem_disp += @intCast(field_type.abiSize(zcu)); + } + }, + else => return cg.fail("failed to select {s} {}", .{ + @tagName(air_tag), + agg_ty.fmt(pt), + }), + } + try res.finish(inst, &.{}, &.{}, cg); + }, .union_init => if (use_old) try cg.airUnionInit(inst) else { const ty_pl = air_datas[@intFromEnum(inst)].ty_pl; const extra = cg.air.extraData(Air.UnionInit, ty_pl.payload).data; @@ -82199,14 +82266,14 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .c_va_start => try cg.airVaStart(inst), .work_item_id, .work_group_size, .work_group_id => unreachable, } - try cg.resetTemps(); + try cg.resetTemps(@enumFromInt(0)); cg.checkInvariantsAfterAirInst(); } verbose_tracking_log.debug("{}", .{cg.fmtTracking()}); } -fn genLazy(self: *CodeGen, lazy_sym: link.File.LazySymbol) InnerError!void { - const pt = self.pt; +fn genLazy(cg: *CodeGen, lazy_sym: link.File.LazySymbol) InnerError!void { + const pt = cg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; switch (ip.indexToKey(lazy_sym.ty)) { @@ -82215,97 +82282,98 @@ fn genLazy(self: *CodeGen, lazy_sym: link.File.LazySymbol) InnerError!void { wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)}); const param_regs = abi.getCAbiIntParamRegs(.auto); - const param_locks = self.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*); - defer for (param_locks) |lock| self.register_manager.unlockReg(lock); + const param_locks = cg.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*); + defer for (param_locks) |lock| cg.register_manager.unlockReg(lock); const ret_mcv: MCValue = .{ .register_pair = param_regs[0..2].* }; - const enum_mcv: MCValue = .{ .register = param_regs[0] }; + var enum_temp = try cg.tempInit(enum_ty, .{ .register = param_regs[0] }); - const data_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp); - const data_lock = self.register_manager.lockRegAssumeUnused(data_reg); - defer self.register_manager.unlockReg(data_lock); - try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = lazy_sym.ty }); + const data_reg = try cg.register_manager.allocReg(null, abi.RegisterClass.gp); + const data_lock = cg.register_manager.lockRegAssumeUnused(data_reg); + defer cg.register_manager.unlockReg(data_lock); + try cg.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = lazy_sym.ty }); var data_off: i32 = 0; + const reset_index = cg.next_temp_index; const tag_names = ip.loadEnumType(lazy_sym.ty).names; for (0..tag_names.len) |tag_index| { - var enum_temp = try self.tempInit(enum_ty, enum_mcv); - const tag_name_len = tag_names.get(ip)[tag_index].length(ip); - var tag_temp = try self.tempFromValue(try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index))); - const cc_temp = enum_temp.cmpInts(.neq, &tag_temp, self) catch |err| switch (err) { + var tag_temp = try cg.tempFromValue(try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index))); + const cc_temp = enum_temp.cmpInts(.neq, &tag_temp, cg) catch |err| switch (err) { error.SelectFailed => unreachable, else => |e| return e, }; - try enum_temp.die(self); - try tag_temp.die(self); - const skip_reloc = try self.asmJccReloc(cc_temp.tracking(self).short.eflags, undefined); - try cc_temp.die(self); - try self.resetTemps(); + try tag_temp.die(cg); + const skip_reloc = try cg.asmJccReloc(cc_temp.tracking(cg).short.eflags, undefined); + try cc_temp.die(cg); + try cg.resetTemps(reset_index); - try self.genSetReg( + try cg.genSetReg( ret_mcv.register_pair[0], .usize, .{ .register_offset = .{ .reg = data_reg, .off = data_off } }, .{}, ); - try self.genSetReg(ret_mcv.register_pair[1], .usize, .{ .immediate = tag_name_len }, .{}); - try self.asmOpOnly(.{ ._, .ret }); + try cg.genSetReg(ret_mcv.register_pair[1], .usize, .{ .immediate = tag_name_len }, .{}); + try cg.asmOpOnly(.{ ._, .ret }); - self.performReloc(skip_reloc); + cg.performReloc(skip_reloc); data_off += @intCast(tag_name_len + 1); } + try enum_temp.die(cg); - try self.genSetReg(ret_mcv.register_pair[0], .usize, .{ .immediate = 0 }, .{}); - try self.asmOpOnly(.{ ._, .ret }); + try cg.genSetReg(ret_mcv.register_pair[0], .usize, .{ .immediate = 0 }, .{}); + try cg.asmOpOnly(.{ ._, .ret }); }, .error_set_type => |error_set_type| { const err_ty: Type = .fromInterned(lazy_sym.ty); wip_mir_log.debug("{}.@errorCast:", .{err_ty.fmt(pt)}); const param_regs = abi.getCAbiIntParamRegs(.auto); - const param_locks = self.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*); - defer for (param_locks) |lock| self.register_manager.unlockReg(lock); + const param_locks = cg.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*); + defer for (param_locks) |lock| cg.register_manager.unlockReg(lock); const ret_mcv: MCValue = .{ .register = param_regs[0] }; const err_mcv: MCValue = .{ .register = param_regs[0] }; + var err_temp = try cg.tempInit(err_ty, err_mcv); const ExpectedContents = [32]Mir.Inst.Index; var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); const allocator = stack.get(); const relocs = try allocator.alloc(Mir.Inst.Index, error_set_type.names.len); defer allocator.free(relocs); + const reset_index = cg.next_temp_index; for (0.., relocs) |tag_index, *reloc| { - var err_temp = try self.tempInit(err_ty, err_mcv); - - var tag_temp = try self.tempInit(.anyerror, .{ + var tag_temp = try cg.tempInit(.anyerror, .{ .immediate = ip.getErrorValueIfExists(error_set_type.names.get(ip)[tag_index]).?, }); - const cc_temp = err_temp.cmpInts(.eq, &tag_temp, self) catch |err| switch (err) { + const cc_temp = err_temp.cmpInts(.eq, &tag_temp, cg) catch |err| switch (err) { error.SelectFailed => unreachable, else => |e| return e, }; - try err_temp.die(self); - try tag_temp.die(self); - reloc.* = try self.asmJccReloc(cc_temp.tracking(self).short.eflags, undefined); - try cc_temp.die(self); - try self.resetTemps(); + try tag_temp.die(cg); + reloc.* = try cg.asmJccReloc(cc_temp.tracking(cg).short.eflags, undefined); + try cc_temp.die(cg); + try cg.resetTemps(reset_index); } + try err_temp.die(cg); - try self.genCopy(.usize, ret_mcv, .{ .immediate = 0 }, .{}); - for (relocs) |reloc| self.performReloc(reloc); + try cg.genCopy(.usize, ret_mcv, .{ .immediate = 0 }, .{}); + for (relocs) |reloc| cg.performReloc(reloc); assert(ret_mcv.register == err_mcv.register); - try self.asmOpOnly(.{ ._, .ret }); + try cg.asmOpOnly(.{ ._, .ret }); }, - else => return self.fail( + else => return cg.fail( "TODO implement {s} for {}", .{ @tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt) }, ), } + try cg.resetTemps(@enumFromInt(0)); + cg.checkInvariantsAfterAirInst(); } fn getValue(self: *CodeGen, value: MCValue, inst: ?Air.Inst.Index) !void { @@ -93621,17 +93689,17 @@ fn lowerBlock(self: *CodeGen, inst: Air.Inst.Index, body: []const Air.Inst.Index } fn lowerSwitchBr( - self: *CodeGen, + cg: *CodeGen, inst: Air.Inst.Index, switch_br: Air.UnwrappedSwitch, condition: MCValue, condition_dies: bool, is_loop: bool, ) !void { - const zcu = self.pt.zcu; - const condition_ty = self.typeOf(switch_br.operand); - const condition_int_info = self.intInfo(condition_ty).?; - const condition_int_ty = try self.pt.intType(condition_int_info.signedness, condition_int_info.bits); + const zcu = cg.pt.zcu; + const condition_ty = cg.typeOf(switch_br.operand); + const condition_int_info = cg.intInfo(condition_ty).?; + const condition_int_ty = try cg.pt.intType(condition_int_info.signedness, condition_int_info.bits); const ExpectedContents = extern struct { liveness_deaths: [1 << 8 | 1]Air.Inst.Index, @@ -93639,15 +93707,15 @@ fn lowerSwitchBr( relocs: [1 << 6]Mir.Inst.Index, }; var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); const allocator = stack.get(); - const state = try self.saveState(); + const state = try cg.saveState(); - const liveness = try self.liveness.getSwitchBr(allocator, inst, switch_br.cases_len + 1); + const liveness = try cg.liveness.getSwitchBr(allocator, inst, switch_br.cases_len + 1); defer allocator.free(liveness.deaths); - if (!self.mod.pic and self.target.ofmt == .elf) table: { + if (!cg.mod.pic and cg.target.ofmt == .elf) table: { var prong_items: u32 = 0; var min: ?Value = null; var max: ?Value = null; @@ -93690,41 +93758,41 @@ fn lowerSwitchBr( if (prong_items < table_len >> 2) break :table; // no more than 75% waste const condition_index = if (condition_dies and condition.isModifiable()) condition else condition_index: { - const condition_index = try self.allocTempRegOrMem(condition_ty, true); - try self.genCopy(condition_ty, condition_index, condition, .{}); + const condition_index = try cg.allocTempRegOrMem(condition_ty, true); + try cg.genCopy(condition_ty, condition_index, condition, .{}); break :condition_index condition_index; }; - try self.spillEflagsIfOccupied(); - if (min.?.orderAgainstZero(zcu).compare(.neq)) try self.genBinOpMir( + try cg.spillEflagsIfOccupied(); + if (min.?.orderAgainstZero(zcu).compare(.neq)) try cg.genBinOpMir( .{ ._, .sub }, condition_ty, condition_index, .{ .air_ref = Air.internedToRef(min.?.toIntern()) }, ); const else_reloc = if (switch_br.else_body_len > 0) else_reloc: { - var cond_temp = try self.tempInit(condition_ty, condition_index); - var table_max_temp = try self.tempFromValue(try self.pt.intValue(condition_int_ty, table_len - 1)); - const cc_temp = cond_temp.cmpInts(.gt, &table_max_temp, self) catch |err| switch (err) { + var cond_temp = try cg.tempInit(condition_ty, condition_index); + var table_max_temp = try cg.tempFromValue(try cg.pt.intValue(condition_int_ty, table_len - 1)); + const cc_temp = cond_temp.cmpInts(.gt, &table_max_temp, cg) catch |err| switch (err) { error.SelectFailed => unreachable, else => |e| return e, }; - try cond_temp.die(self); - try table_max_temp.die(self); - const else_reloc = try self.asmJccReloc(cc_temp.tracking(self).short.eflags, undefined); - try cc_temp.die(self); + try cond_temp.die(cg); + try table_max_temp.die(cg); + const else_reloc = try cg.asmJccReloc(cc_temp.tracking(cg).short.eflags, undefined); + try cc_temp.die(cg); break :else_reloc else_reloc; } else undefined; - const table_start: u31 = @intCast(self.mir_table.items.len); + const table_start: u31 = @intCast(cg.mir_table.items.len); { const condition_index_reg = if (condition_index.isRegister()) condition_index.getReg().? else - try self.copyToTmpRegister(.usize, condition_index); - const condition_index_lock = self.register_manager.lockReg(condition_index_reg); - defer if (condition_index_lock) |lock| self.register_manager.unlockReg(lock); - try self.truncateRegister(condition_ty, condition_index_reg); - const ptr_size = @divExact(self.target.ptrBitWidth(), 8); - try self.asmMemory(.{ ._mp, .j }, .{ + try cg.copyToTmpRegister(.usize, condition_index); + const condition_index_lock = cg.register_manager.lockReg(condition_index_reg); + defer if (condition_index_lock) |lock| cg.register_manager.unlockReg(lock); + try cg.truncateRegister(condition_ty, condition_index_reg); + const ptr_size = @divExact(cg.target.ptrBitWidth(), 8); + try cg.asmMemory(.{ ._mp, .j }, .{ .base = .table, .mod = .{ .rm = .{ .size = .ptr, @@ -93735,32 +93803,32 @@ fn lowerSwitchBr( }); } const else_reloc_marker: u32 = 0; - assert(self.mir_instructions.len > else_reloc_marker); - try self.mir_table.appendNTimes(self.gpa, else_reloc_marker, table_len); - if (is_loop) try self.loop_switches.putNoClobber(self.gpa, inst, .{ + assert(cg.mir_instructions.len > else_reloc_marker); + try cg.mir_table.appendNTimes(cg.gpa, else_reloc_marker, table_len); + if (is_loop) try cg.loop_switches.putNoClobber(cg.gpa, inst, .{ .start = table_start, .len = table_len, .min = min.?, .else_relocs = if (switch_br.else_body_len > 0) .{ .forward = .empty } else .@"unreachable", }); defer if (is_loop) { - var loop_switch_data = self.loop_switches.fetchRemove(inst).?.value; + var loop_switch_data = cg.loop_switches.fetchRemove(inst).?.value; switch (loop_switch_data.else_relocs) { .@"unreachable", .backward => {}, - .forward => |*else_relocs| else_relocs.deinit(self.gpa), + .forward => |*else_relocs| else_relocs.deinit(cg.gpa), } }; var cases_it = switch_br.iterateCases(); while (cases_it.next()) |case| { { - const table = self.mir_table.items[table_start..][0..table_len]; + const table = cg.mir_table.items[table_start..][0..table_len]; for (case.items) |item| { const val = Value.fromInterned(item.toInterned().?); var val_space: Value.BigIntSpace = undefined; const val_bigint = val.toBigInt(&val_space, zcu); var index_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined }; index_bigint.sub(val_bigint, min_bigint); - table[index_bigint.toConst().to(u10) catch unreachable] = @intCast(self.mir_instructions.len); + table[index_bigint.toConst().to(u10) catch unreachable] = @intCast(cg.mir_instructions.len); } for (case.ranges) |range| { var low_space: Value.BigIntSpace = undefined; @@ -93772,14 +93840,14 @@ fn lowerSwitchBr( const start = index_bigint.toConst().to(u10) catch unreachable; index_bigint.sub(high_bigint, min_bigint); const end = @as(u11, index_bigint.toConst().to(u10) catch unreachable) + 1; - @memset(table[start..end], @intCast(self.mir_instructions.len)); + @memset(table[start..end], @intCast(cg.mir_instructions.len)); } } - for (liveness.deaths[case.idx]) |operand| try self.processDeath(operand); + for (liveness.deaths[case.idx]) |operand| try cg.processDeath(operand); - try self.genBodyBlock(case.body); - try self.restoreState(state, &.{}, .{ + try cg.genBodyBlock(case.body); + try cg.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, @@ -93790,21 +93858,21 @@ fn lowerSwitchBr( const else_body = cases_it.elseBody(); const else_deaths = liveness.deaths.len - 1; - for (liveness.deaths[else_deaths]) |operand| try self.processDeath(operand); + for (liveness.deaths[else_deaths]) |operand| try cg.processDeath(operand); - self.performReloc(else_reloc); + cg.performReloc(else_reloc); if (is_loop) { - const loop_switch_data = self.loop_switches.getPtr(inst).?; - for (loop_switch_data.else_relocs.forward.items) |reloc| self.performReloc(reloc); - loop_switch_data.else_relocs.forward.deinit(self.gpa); - loop_switch_data.else_relocs = .{ .backward = @intCast(self.mir_instructions.len) }; + const loop_switch_data = cg.loop_switches.getPtr(inst).?; + for (loop_switch_data.else_relocs.forward.items) |reloc| cg.performReloc(reloc); + loop_switch_data.else_relocs.forward.deinit(cg.gpa); + loop_switch_data.else_relocs = .{ .backward = @intCast(cg.mir_instructions.len) }; } - for (self.mir_table.items[table_start..][0..table_len]) |*entry| if (entry.* == else_reloc_marker) { - entry.* = @intCast(self.mir_instructions.len); + for (cg.mir_table.items[table_start..][0..table_len]) |*entry| if (entry.* == else_reloc_marker) { + entry.* = @intCast(cg.mir_instructions.len); }; - try self.genBodyBlock(else_body); - try self.restoreState(state, &.{}, .{ + try cg.genBodyBlock(else_body); + try cg.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, @@ -93819,9 +93887,12 @@ fn lowerSwitchBr( const relocs = try allocator.alloc(Mir.Inst.Index, case.items.len + case.ranges.len); defer allocator.free(relocs); - try self.spillEflagsIfOccupied(); + var cond_temp = try cg.tempInit(condition_ty, condition); + const reset_index = cg.next_temp_index; + + try cg.spillEflagsIfOccupied(); for (case.items, relocs[0..case.items.len]) |item, *reloc| { - const item_mcv = try self.resolveInst(item); + const item_mcv = try cg.resolveInst(item); const cc: Condition = switch (condition) { .eflags => |cc| switch (item_mcv.immediate) { 0 => cc.negate(), @@ -93829,27 +93900,24 @@ fn lowerSwitchBr( else => unreachable, }, else => cc: { - var cond_temp = try self.tempInit(condition_ty, condition); - var item_temp = try self.tempInit(condition_ty, item_mcv); - const cc_temp = cond_temp.cmpInts(.eq, &item_temp, self) catch |err| switch (err) { + var item_temp = try cg.tempInit(condition_ty, item_mcv); + const cc_temp = cond_temp.cmpInts(.eq, &item_temp, cg) catch |err| switch (err) { error.SelectFailed => unreachable, else => |e| return e, }; - try cond_temp.die(self); - try item_temp.die(self); - const cc = cc_temp.tracking(self).short.eflags; - try cc_temp.die(self); - try self.resetTemps(); + try item_temp.die(cg); + const cc = cc_temp.tracking(cg).short.eflags; + try cc_temp.die(cg); + try cg.resetTemps(reset_index); break :cc cc; }, }; - reloc.* = try self.asmJccReloc(cc, undefined); + reloc.* = try cg.asmJccReloc(cc, undefined); } for (case.ranges, relocs[case.items.len..]) |range, *reloc| { - var cond_temp = try self.tempInit(condition_ty, condition); - const min_mcv = try self.resolveInst(range[0]); - const max_mcv = try self.resolveInst(range[1]); + const min_mcv = try cg.resolveInst(range[0]); + const max_mcv = try cg.resolveInst(range[1]); // `null` means always false. const lt_min = cc: switch (condition) { .eflags => |cc| switch (min_mcv.immediate) { @@ -93858,19 +93926,19 @@ fn lowerSwitchBr( else => unreachable, }, else => { - var min_temp = try self.tempInit(condition_ty, min_mcv); - const cc_temp = cond_temp.cmpInts(.lt, &min_temp, self) catch |err| switch (err) { + var min_temp = try cg.tempInit(condition_ty, min_mcv); + const cc_temp = cond_temp.cmpInts(.lt, &min_temp, cg) catch |err| switch (err) { error.SelectFailed => unreachable, else => |e| return e, }; - try min_temp.die(self); - const cc = cc_temp.tracking(self).short.eflags; - try cc_temp.die(self); + try min_temp.die(cg); + const cc = cc_temp.tracking(cg).short.eflags; + try cc_temp.die(cg); break :cc cc; }, }; const lt_min_reloc = if (lt_min) |cc| r: { - break :r try self.asmJccReloc(cc, undefined); + break :r try cg.asmJccReloc(cc, undefined); } else null; // `null` means always true. const lte_max = switch (condition) { @@ -93880,38 +93948,41 @@ fn lowerSwitchBr( else => unreachable, }, else => cc: { - var max_temp = try self.tempInit(condition_ty, max_mcv); - const cc_temp = cond_temp.cmpInts(.lte, &max_temp, self) catch |err| switch (err) { + var max_temp = try cg.tempInit(condition_ty, max_mcv); + const cc_temp = cond_temp.cmpInts(.lte, &max_temp, cg) catch |err| switch (err) { error.SelectFailed => unreachable, else => |e| return e, }; - try max_temp.die(self); - const cc = cc_temp.tracking(self).short.eflags; - try cc_temp.die(self); + try max_temp.die(cg); + const cc = cc_temp.tracking(cg).short.eflags; + try cc_temp.die(cg); break :cc cc; }, }; - try cond_temp.die(self); - try self.resetTemps(); + try cg.resetTemps(reset_index); // "Success" case is in `reloc`.... if (lte_max) |cc| { - reloc.* = try self.asmJccReloc(cc, undefined); + reloc.* = try cg.asmJccReloc(cc, undefined); } else { - reloc.* = try self.asmJmpReloc(undefined); + reloc.* = try cg.asmJmpReloc(undefined); } // ...and "fail" case falls through to next checks. - if (lt_min_reloc) |r| self.performReloc(r); + if (lt_min_reloc) |r| cg.performReloc(r); } + try cond_temp.die(cg); + try cg.resetTemps(@enumFromInt(0)); + cg.checkInvariantsAfterAirInst(); + // The jump to skip this case if the conditions all failed. - const skip_case_reloc = try self.asmJmpReloc(undefined); + const skip_case_reloc = try cg.asmJmpReloc(undefined); - for (liveness.deaths[case.idx]) |operand| try self.processDeath(operand); + for (liveness.deaths[case.idx]) |operand| try cg.processDeath(operand); // Relocate all success cases to the body we're about to generate. - for (relocs) |reloc| self.performReloc(reloc); - try self.genBodyBlock(case.body); - try self.restoreState(state, &.{}, .{ + for (relocs) |reloc| cg.performReloc(reloc); + try cg.genBodyBlock(case.body); + try cg.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, @@ -93919,16 +93990,16 @@ fn lowerSwitchBr( }); // Relocate the "skip" branch to fall through to the next case. - self.performReloc(skip_case_reloc); + cg.performReloc(skip_case_reloc); } if (switch_br.else_body_len > 0) { const else_body = cases_it.elseBody(); const else_deaths = liveness.deaths.len - 1; - for (liveness.deaths[else_deaths]) |operand| try self.processDeath(operand); + for (liveness.deaths[else_deaths]) |operand| try cg.processDeath(operand); - try self.genBodyBlock(else_body); - try self.restoreState(state, &.{}, .{ + try cg.genBodyBlock(else_body); + try cg.restoreState(state, &.{}, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, @@ -95003,7 +95074,7 @@ fn moveStrategy(cg: *CodeGen, ty: Type, class: Register.Class, aligned: bool) !M .mmx => {}, .sse => switch (ty.zigTypeTag(zcu)) { else => { - const classes = std.mem.sliceTo(&abi.classifySystemV(ty, zcu, cg.target.*, .other), .none); + const classes = std.mem.sliceTo(&abi.classifySystemV(ty, zcu, cg.target, .other), .none); assert(std.mem.indexOfNone(abi.Class, classes, &.{ .integer, .sse, .sseup, .memory, .float, .float_combine, }) == null); @@ -99635,7 +99706,7 @@ fn airVaArg(self: *CodeGen, inst: Air.Inst.Index) !void { const overflow_arg_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 8 } }; const reg_save_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 16 } }; - const classes = std.mem.sliceTo(&abi.classifySystemV(promote_ty, zcu, self.target.*, .arg), .none); + const classes = std.mem.sliceTo(&abi.classifySystemV(promote_ty, zcu, self.target, .arg), .none); switch (classes[0]) { .integer => { assert(classes.len == 1); @@ -99980,7 +100051,7 @@ fn resolveCallingConventionValues( var ret_tracking_i: usize = 0; const classes = switch (cc) { - .x86_64_sysv => std.mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, self.target.*, .ret), .none), + .x86_64_sysv => std.mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, self.target, .ret), .none), .x86_64_win => &.{abi.classifyWindows(ret_ty, zcu)}, else => unreachable, }; @@ -100069,7 +100140,7 @@ fn resolveCallingConventionValues( var arg_mcv_i: usize = 0; const classes = switch (cc) { - .x86_64_sysv => std.mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .arg), .none), + .x86_64_sysv => std.mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target, .arg), .none), .x86_64_win => &.{abi.classifyWindows(ty, zcu)}, else => unreachable, }; @@ -100373,7 +100444,7 @@ fn splitType(self: *CodeGen, comptime parts_len: usize, ty: Type) ![parts_len]Ty error.DivisionByZero => unreachable, error.UnexpectedRemainder => {}, }; - const classes = std.mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .other), .none); + const classes = std.mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target, .other), .none); if (classes.len == parts_len) for (&parts, classes, 0..) |*part, class, part_i| { part.* = switch (class) { .integer => if (part_i < parts_len - 1) @@ -101339,6 +101410,7 @@ const Temp = struct { const val_mcv = val.tracking(cg).short; switch (val_mcv) { else => |mcv| std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }), + .none => {}, .undef => if (opts.safe) { var dst_ptr = try cg.tempInit(.usize, dst.tracking(cg).short.address().offset(opts.disp)); var pat = try cg.tempInit(.u8, .{ .immediate = 0xaa }); @@ -101371,19 +101443,19 @@ const Temp = struct { .disp = opts.disp, }), ), - .register => |val_reg| try dst.writeRegs(opts.disp, val_ty, &.{registerAlias( + .register => |val_reg| try dst.writeReg(opts.disp, val_ty, registerAlias( val_reg, @intCast(val_ty.abiSize(cg.pt.zcu)), - )}, cg), + ), cg), inline .register_pair, .register_triple, .register_quadruple, => |val_regs| try dst.writeRegs(opts.disp, val_ty, &val_regs, cg), .register_offset => |val_reg_off| switch (val_reg_off.off) { - 0 => try dst.writeRegs(opts.disp, val_ty, &.{registerAlias( + 0 => try dst.writeReg(opts.disp, val_ty, registerAlias( val_reg_off.reg, @intCast(val_ty.abiSize(cg.pt.zcu)), - )}, cg), + ), cg), else => continue :val_to_gpr, }, .register_overflow => |val_reg_ov| { @@ -101401,7 +101473,7 @@ const Temp = struct { else => std.debug.panic("{s}: {}\n", .{ @src().fn_name, val_ty.fmt(cg.pt) }), }); const first_size: u31 = @intCast(first_ty.abiSize(cg.pt.zcu)); - try dst.writeRegs(opts.disp, first_ty, &.{registerAlias(val_reg_ov.reg, first_size)}, cg); + try dst.writeReg(opts.disp, first_ty, registerAlias(val_reg_ov.reg, first_size), cg); try cg.asmSetccMemory( val_reg_ov.eflags, try dst.tracking(cg).short.mem(cg, .{ @@ -101492,42 +101564,76 @@ const Temp = struct { })); } + fn writeReg(dst: Temp, disp: i32, src_ty: Type, src_reg: Register, cg: *CodeGen) InnerError!void { + const src_abi_size: u31 = @intCast(src_ty.abiSize(cg.pt.zcu)); + const src_rc = src_reg.class(); + if (src_rc == .x87 or std.math.isPowerOfTwo(src_abi_size)) { + const strat = try cg.moveStrategy(src_ty, src_rc, false); + try strat.write(cg, try dst.tracking(cg).short.mem(cg, .{ + .size = .fromBitSize(@min(8 * src_abi_size, src_reg.bitSize())), + .disp = disp, + }), registerAlias(src_reg, src_abi_size)); + } else { + const frame_size = std.math.ceilPowerOfTwoAssert(u32, src_abi_size); + const frame_index = try cg.allocFrameIndex(.init(.{ + .size = frame_size, + .alignment = .fromNonzeroByteUnits(frame_size), + })); + const strat = try cg.moveStrategy(src_ty, src_rc, true); + try strat.write(cg, .{ + .base = .{ .frame = frame_index }, + .mod = .{ .rm = .{ .size = .fromSize(frame_size) } }, + }, registerAlias(src_reg, frame_size)); + var dst_ptr = try cg.tempInit(.usize, dst.tracking(cg).short.address()); + try dst_ptr.toOffset(disp, cg); + var src_ptr = try cg.tempInit(.usize, .{ .lea_frame = .{ .index = frame_index } }); + var len = try cg.tempInit(.usize, .{ .immediate = src_abi_size }); + try dst_ptr.memcpy(&src_ptr, &len, cg); + try dst_ptr.die(cg); + try src_ptr.die(cg); + try len.die(cg); + } + } + fn writeRegs(dst: Temp, disp: i32, src_ty: Type, src_regs: []const Register, cg: *CodeGen) InnerError!void { + const zcu = cg.pt.zcu; + const classes = std.mem.sliceTo(&abi.classifySystemV(src_ty, zcu, cg.target, .other), .none); + var next_class_index: u4 = 0; var part_disp = disp; - var src_abi_size: u32 = @intCast(src_ty.abiSize(cg.pt.zcu)); + var remaining_abi_size = src_ty.abiSize(zcu); for (src_regs) |src_reg| { - const src_rc = src_reg.class(); - const part_bit_size = @min(8 * src_abi_size, src_reg.bitSize()); - const part_size = @divExact(part_bit_size, 8); - if (src_rc == .x87 or std.math.isPowerOfTwo(part_size)) { - const strat = try cg.moveStrategy(src_ty, src_rc, false); - try strat.write(cg, try dst.tracking(cg).short.mem(cg, .{ - .size = .fromBitSize(part_bit_size), - .disp = part_disp, - }), registerAlias(src_reg, part_size)); - } else { - const frame_size = std.math.ceilPowerOfTwoAssert(u32, part_size); - const frame_index = try cg.allocFrameIndex(.init(.{ - .size = frame_size, - .alignment = .fromNonzeroByteUnits(frame_size), - })); - const strat = try cg.moveStrategy(src_ty, src_rc, true); - try strat.write(cg, .{ - .base = .{ .frame = frame_index }, - .mod = .{ .rm = .{ .size = .fromSize(frame_size) } }, - }, registerAlias(src_reg, frame_size)); - var dst_ptr = try cg.tempInit(.usize, dst.tracking(cg).short.address()); - try dst_ptr.toOffset(part_disp, cg); - var src_ptr = try cg.tempInit(.usize, .{ .lea_frame = .{ .index = frame_index } }); - var len = try cg.tempInit(.usize, .{ .immediate = src_abi_size }); - try dst_ptr.memcpy(&src_ptr, &len, cg); - try dst_ptr.die(cg); - try src_ptr.die(cg); - try len.die(cg); - } + const class_index = next_class_index; + const class = classes[class_index]; + next_class_index = @intCast(switch (class) { + .integer, .memory, .float, .float_combine => class_index + 1, + .sse => std.mem.indexOfNonePos(abi.Class, classes, class_index + 1, &.{.sseup}) orelse classes.len, + .x87 => std.mem.indexOfNonePos(abi.Class, classes, class_index + 1, &.{.x87up}) orelse classes.len, + .sseup, .x87up, .complex_x87, .none, .win_i128, .integer_per_element => unreachable, + }); + const part_size = switch (class) { + .integer, .sse, .memory => @min(8 * @as(u7, next_class_index - class_index), remaining_abi_size), + .x87 => 16, + .float => 4, + .float_combine => 8, + .sseup, .x87up, .complex_x87, .none, .win_i128, .integer_per_element => unreachable, + }; + try dst.writeReg(part_disp, switch (class) { + .integer => .u64, + .sse => switch (part_size) { + else => unreachable, + 8 => .f64, + 16 => .vector_2_f64, + 32 => .vector_4_f64, + }, + .x87 => .f80, + .float => .f32, + .float_combine => .vector_2_f32, + .sseup, .x87up, .complex_x87, .memory, .none, .win_i128, .integer_per_element => unreachable, + }, src_reg, cg); part_disp += part_size; - src_abi_size -= part_size; + remaining_abi_size -= part_size; } + assert(next_class_index == classes.len); } fn memcpy(dst: *Temp, src: *Temp, len: *Temp, cg: *CodeGen) InnerError!void { @@ -105786,9 +105892,9 @@ const Temp = struct { }; }; -fn resetTemps(cg: *CodeGen) InnerError!void { +fn resetTemps(cg: *CodeGen, from_index: Temp.Index) InnerError!void { var any_valid = false; - for (0..@intFromEnum(cg.next_temp_index)) |temp_index| { + for (@intFromEnum(from_index)..@intFromEnum(cg.next_temp_index)) |temp_index| { const temp: Temp.Index = @enumFromInt(temp_index); if (temp.isValid(cg)) { any_valid = true; @@ -105800,7 +105906,7 @@ fn resetTemps(cg: *CodeGen) InnerError!void { cg.temp_type[temp_index] = undefined; } if (any_valid) return cg.fail("failed to kill all temps", .{}); - cg.next_temp_index = @enumFromInt(0); + cg.next_temp_index = from_index; } fn reuseTemp( @@ -105889,70 +105995,75 @@ fn tempMemFromValue(cg: *CodeGen, value: Value) InnerError!Temp { return cg.tempInit(value.typeOf(cg.pt.zcu), try cg.lowerUav(value)); } -fn tempFromOperand( - cg: *CodeGen, - inst: Air.Inst.Index, - op_index: Liveness.OperandInt, - op_ref: Air.Inst.Ref, - ignore_death: bool, -) InnerError!Temp { +fn tempFromOperand(cg: *CodeGen, op_ref: Air.Inst.Ref, op_dies: bool) InnerError!Temp { const zcu = cg.pt.zcu; const ip = &zcu.intern_pool; - if (ignore_death or !cg.liveness.operandDies(inst, op_index)) { - if (op_ref.toIndex()) |op_inst| return .{ .index = op_inst }; - const val = op_ref.toInterned().?; - const gop = try cg.const_tracking.getOrPut(cg.gpa, val); - if (!gop.found_existing) gop.value_ptr.* = .init(init: { - const const_mcv = try cg.genTypedValue(.fromInterned(val)); - switch (const_mcv) { - .lea_tlv => |tlv_sym| switch (cg.bin_file.tag) { - .elf, .macho => { - if (cg.mod.pic) { - try cg.spillRegisters(&.{ .rdi, .rax }); - } else { - try cg.spillRegisters(&.{.rax}); - } - const frame_index = try cg.allocFrameIndex(.init(.{ - .size = 8, - .alignment = .@"8", - })); - try cg.genSetMem( - .{ .frame = frame_index }, - 0, - .usize, - .{ .lea_symbol = .{ .sym_index = tlv_sym } }, - .{}, - ); - break :init .{ .load_frame = .{ .index = frame_index } }; - }, - else => break :init const_mcv, + if (op_dies) { + const temp_index = cg.next_temp_index; + const temp: Temp = .{ .index = temp_index.toIndex() }; + const op_inst = op_ref.toIndex().?; + const tracking = cg.getResolvedInstValue(op_inst); + temp_index.tracking(cg).* = tracking.*; + if (!cg.reuseTemp(temp.index, op_inst, tracking)) return .{ .index = op_ref.toIndex().? }; + cg.temp_type[@intFromEnum(temp_index)] = cg.typeOf(op_ref); + cg.next_temp_index = @enumFromInt(@intFromEnum(temp_index) + 1); + return temp; + } + + if (op_ref.toIndex()) |op_inst| return .{ .index = op_inst }; + const val = op_ref.toInterned().?; + const gop = try cg.const_tracking.getOrPut(cg.gpa, val); + if (!gop.found_existing) gop.value_ptr.* = .init(init: { + const const_mcv = try cg.genTypedValue(.fromInterned(val)); + switch (const_mcv) { + .lea_tlv => |tlv_sym| switch (cg.bin_file.tag) { + .elf, .macho => { + if (cg.mod.pic) { + try cg.spillRegisters(&.{ .rdi, .rax }); + } else { + try cg.spillRegisters(&.{.rax}); + } + const frame_index = try cg.allocFrameIndex(.init(.{ + .size = 8, + .alignment = .@"8", + })); + try cg.genSetMem( + .{ .frame = frame_index }, + 0, + .usize, + .{ .lea_symbol = .{ .sym_index = tlv_sym } }, + .{}, + ); + break :init .{ .load_frame = .{ .index = frame_index } }; }, else => break :init const_mcv, - } - }); - return cg.tempInit(.fromInterned(ip.typeOf(val)), gop.value_ptr.short); - } + }, + else => break :init const_mcv, + } + }); + return cg.tempInit(.fromInterned(ip.typeOf(val)), gop.value_ptr.short); +} - const temp_index = cg.next_temp_index; - const temp: Temp = .{ .index = temp_index.toIndex() }; - const op_inst = op_ref.toIndex().?; - const tracking = cg.getResolvedInstValue(op_inst); - temp_index.tracking(cg).* = tracking.*; - if (!cg.reuseTemp(temp.index, op_inst, tracking)) return .{ .index = op_ref.toIndex().? }; - cg.temp_type[@intFromEnum(temp_index)] = cg.typeOf(op_ref); - cg.next_temp_index = @enumFromInt(@intFromEnum(temp_index) + 1); - return temp; +fn tempsFromOperandsInner( + cg: *CodeGen, + inst: Air.Inst.Index, + op_temps: []Temp, + op_refs: []const Air.Inst.Ref, +) InnerError!void { + for (op_temps, 0.., op_refs) |*op_temp, op_index, op_ref| op_temp.* = try cg.tempFromOperand(op_ref, for (op_refs[0..op_index]) |prev_op_ref| { + if (op_ref == prev_op_ref) break false; + } else cg.liveness.operandDies(inst, @intCast(op_index))); } -inline fn tempsFromOperands(cg: *CodeGen, inst: Air.Inst.Index, op_refs: anytype) InnerError![op_refs.len]Temp { - var temps: [op_refs.len]Temp = undefined; - inline for (&temps, 0.., op_refs) |*temp, op_index, op_ref| { - temp.* = try cg.tempFromOperand(inst, op_index, op_ref, inline for (0..op_index) |prev_op_index| { - if (op_ref == op_refs[prev_op_index]) break true; - } else false); - } - return temps; +inline fn tempsFromOperands( + cg: *CodeGen, + inst: Air.Inst.Index, + op_refs: anytype, +) InnerError![op_refs.len]Temp { + var op_temps: [op_refs.len]Temp = undefined; + try cg.tempsFromOperandsInner(inst, &op_temps, &op_refs); + return op_temps; } const Operand = union(enum) { diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index 2ac3402fd8e9..6d11a44c2a42 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -100,7 +100,7 @@ pub const Context = enum { ret, arg, field, other }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class { +pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Context) [8]Class { const memory_class = [_]Class{ .memory, .none, .none, .none, .none, .none, .none, .none, @@ -148,7 +148,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8 result[0] = .integer; return result; }, - .float => switch (ty.floatBits(target)) { + .float => switch (ty.floatBits(target.*)) { 16 => { if (ctx == .field) { result[0] = .memory; @@ -330,7 +330,7 @@ fn classifySystemVStruct( starting_byte_offset: u64, loaded_struct: InternPool.LoadedStructType, zcu: *Zcu, - target: std.Target, + target: *const std.Target, ) u64 { const ip = &zcu.intern_pool; var byte_offset = starting_byte_offset; @@ -379,7 +379,7 @@ fn classifySystemVUnion( starting_byte_offset: u64, loaded_union: InternPool.LoadedUnionType, zcu: *Zcu, - target: std.Target, + target: *const std.Target, ) u64 { const ip = &zcu.intern_pool; for (0..loaded_union.field_types.len) |field_index| { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e82d75311ed4..f8aeface597f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -100,7 +100,6 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 { .kalimba, .propeller, => unreachable, // Gated by hasLlvmSupport(). - }; try llvm_triple.appendSlice(llvm_arch); @@ -309,467 +308,154 @@ pub fn supportsTailCall(target: std.Target) bool { } } -const DataLayoutBuilder = struct { - target: std.Target, +pub fn dataLayout(target: std.Target) []const u8 { + // These data layouts should match Clang. + return switch (target.cpu.arch) { + .arc => "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-f32:32:32-i64:32-f64:32-a:0:32-n32", + .xcore => "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32-f64:32-a:0:32-n32", + .hexagon => "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048", + .lanai => "E-m:e-p:32:32-i64:64-a:0:32-n32-S64", + .aarch64 => if (target.ofmt == .macho) + if (target.os.tag == .windows) + "e-m:o-i64:64-i128:128-n32:64-S128-Fn32" + else if (target.abi == .ilp32) + "e-m:o-p:32:32-i64:64-i128:128-n32:64-S128-Fn32" + else + "e-m:o-i64:64-i128:128-n32:64-S128-Fn32" + else if (target.os.tag == .windows) + "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128-Fn32" + else + "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", + .aarch64_be => "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", + .arm => if (target.ofmt == .macho) + "e-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + else + "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", + .armeb, .thumbeb => if (target.ofmt == .macho) + "E-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + else + "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", + .thumb => if (target.ofmt == .macho) + "e-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + else if (target.os.tag == .windows) + "e-m:w-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + else + "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", + .avr => "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8", + .bpfeb => "E-m:e-p:64:64-i64:64-i128:128-n32:64-S128", + .bpfel => "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128", + .msp430 => "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16", + .mips => "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64", + .mipsel => "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64", + .mips64 => switch (target.abi) { + .gnuabin32, .muslabin32 => "E-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128", + else => "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128", + }, + .mips64el => switch (target.abi) { + .gnuabin32, .muslabin32 => "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128", + else => "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128", + }, + .m68k => "E-m:e-p:32:16:32-i8:8:8-i16:16:16-i32:16:32-n8:16:32-a:0:16-S16", + .powerpc => if (target.os.tag == .aix) + "E-m:a-p:32:32-Fi32-i64:64-n32" + else + "E-m:e-p:32:32-Fn32-i64:64-n32", + .powerpcle => "e-m:e-p:32:32-Fn32-i64:64-n32", + .powerpc64 => switch (target.os.tag) { + .aix => "E-m:a-Fi64-i64:64-n32:64-S128-v256:256:256-v512:512:512", + .linux => if (target.abi.isMusl()) + "E-m:e-Fn32-i64:64-n32:64-S128-v256:256:256-v512:512:512" + else + "E-m:e-Fi64-i64:64-n32:64-S128-v256:256:256-v512:512:512", + .ps3 => "E-m:e-p:32:32-Fi64-i64:64-n32:64", + else => if (target.os.tag == .openbsd or + (target.os.tag == .freebsd and target.os.version_range.semver.isAtLeast(.{ .major = 13, .minor = 0, .patch = 0 }) orelse false)) + "E-m:e-Fn32-i64:64-n32:64" + else + "E-m:e-Fi64-i64:64-n32:64", + }, + .powerpc64le => if (target.os.tag == .linux) + "e-m:e-Fn32-i64:64-n32:64-S128-v256:256:256-v512:512:512" + else + "e-m:e-Fn32-i64:64-n32:64", + .nvptx => "e-p:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64", + .nvptx64 => "e-i64:64-i128:128-v16:16-v32:32-n16:32:64", + .amdgcn => "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9", + .riscv32 => if (std.Target.riscv.featureSetHas(target.cpu.features, .e)) + "e-m:e-p:32:32-i64:64-n32-S32" + else + "e-m:e-p:32:32-i64:64-n32-S128", + .riscv64 => if (std.Target.riscv.featureSetHas(target.cpu.features, .e)) + "e-m:e-p:64:64-i64:64-i128:128-n32:64-S64" + else + "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128", + .sparc => "E-m:e-p:32:32-i64:64-f128:64-n32-S64", + .sparc64 => "E-m:e-i64:64-n32:64-S128", + .s390x => if (target.os.tag == .zos) + "E-m:l-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64" + else + "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64", + .x86 => switch (target.os.tag) { + .elfiamcu => "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:32-f64:32-f128:32-n8:16:32-a:0:32-S32", + .windows => switch (target.abi) { + .cygnus => "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32", + .gnu => if (target.ofmt == .coff) + "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32" + else + "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32", + else => blk: { + const msvc = switch (target.abi) { + .none, .msvc => true, + else => false, + }; - pub fn format( - self: DataLayoutBuilder, - comptime _: []const u8, - _: std.fmt.FormatOptions, - writer: anytype, - ) @TypeOf(writer).Error!void { - try writer.writeByte(switch (self.target.cpu.arch.endian()) { - .little => 'e', - .big => 'E', - }); - switch (self.target.cpu.arch) { - .amdgcn, - .nvptx, - .nvptx64, - => {}, - .avr => try writer.writeAll("-P1"), - else => try writer.print("-m:{c}", .{@as(u8, switch (self.target.cpu.arch) { - .mips, .mipsel => 'm', // Mips mangling: Private symbols get a $ prefix. - else => switch (self.target.ofmt) { - .elf => 'e', // ELF mangling: Private symbols get a `.L` prefix. - //.goff => 'l', // GOFF mangling: Private symbols get a `@` prefix. - .macho => 'o', // Mach-O mangling: Private symbols get `L` prefix. - // Other symbols get a `_` prefix. - .coff => switch (self.target.os.tag) { - .uefi, .windows => switch (self.target.cpu.arch) { - .x86 => 'x', // Windows x86 COFF mangling: Private symbols get the usual - // prefix. Regular C symbols get a `_` prefix. Functions with `__stdcall`, - //`__fastcall`, and `__vectorcall` have custom mangling that appends `@N` - // where N is the number of bytes used to pass parameters. C++ symbols - // starting with `?` are not mangled in any way. - else => 'w', // Windows COFF mangling: Similar to x, except that normal C - // symbols do not receive a `_` prefix. - }, - else => 'e', - }, - //.xcoff => 'a', // XCOFF mangling: Private symbols get a `L..` prefix. - else => 'e', - }, - })}), - } - const stack_abi = self.target.stackAlignment() * 8; - if (self.target.cpu.arch == .csky) try writer.print("-S{d}", .{stack_abi}); - var any_non_integral = false; - const ptr_bit_width = self.target.ptrBitWidth(); - var default_info = struct { size: u16, abi: u16, pref: u16, idx: u16 }{ - .size = 64, - .abi = 64, - .pref = 64, - .idx = 64, - }; - const addr_space_info = llvmAddrSpaceInfo(self.target); - for (addr_space_info, 0..) |info, i| { - assert((info.llvm == .default) == (i == 0)); - if (info.non_integral) { - assert(info.llvm != .default); - any_non_integral = true; - } - const size = info.size orelse ptr_bit_width; - const abi = info.abi orelse ptr_bit_width; - const pref = info.pref orelse abi; - const idx = info.idx orelse size; - const matches_default = - size == default_info.size and - abi == default_info.abi and - pref == default_info.pref and - idx == default_info.idx; - if (info.llvm == .default) default_info = .{ - .size = size, - .abi = abi, - .pref = pref, - .idx = idx, - }; - if (!info.force_in_data_layout and matches_default and - self.target.cpu.arch != .riscv64 and - self.target.cpu.arch != .loongarch64 and - !(self.target.cpu.arch == .aarch64 and - (self.target.os.tag == .uefi or self.target.os.tag == .windows)) and - self.target.cpu.arch != .bpfeb and self.target.cpu.arch != .bpfel) continue; - try writer.writeAll("-p"); - if (info.llvm != .default) try writer.print("{d}", .{@intFromEnum(info.llvm)}); - try writer.print(":{d}:{d}", .{ size, abi }); - if (pref != abi or idx != size or self.target.cpu.arch == .hexagon) { - try writer.print(":{d}", .{pref}); - if (idx != size) try writer.print(":{d}", .{idx}); - } - } - if (self.target.cpu.arch.isArm()) - try writer.writeAll("-Fi8") // for thumb interwork - else if (self.target.cpu.arch == .powerpc64 and - self.target.os.tag != .freebsd and - self.target.os.tag != .openbsd and - !self.target.abi.isMusl()) - try writer.writeAll("-Fi64") - else if (self.target.cpu.arch.isPowerPC() and self.target.os.tag == .aix) - try writer.writeAll(if (self.target.cpu.arch.isPowerPC64()) "-Fi64" else "-Fi32") - else if (self.target.cpu.arch.isPowerPC()) - try writer.writeAll("-Fn32"); - if (self.target.cpu.arch != .hexagon) { - if (self.target.cpu.arch == .arc or self.target.cpu.arch == .s390x) - try self.typeAlignment(.integer, 1, 8, 8, false, writer); - try self.typeAlignment(.integer, 8, 8, 8, false, writer); - try self.typeAlignment(.integer, 16, 16, 16, false, writer); - try self.typeAlignment(.integer, 32, 32, 32, false, writer); - if (self.target.cpu.arch == .arc) - try self.typeAlignment(.float, 32, 32, 32, false, writer); - try self.typeAlignment(.integer, 64, 32, 64, false, writer); - try self.typeAlignment(.integer, 128, 32, 64, false, writer); - if (backendSupportsF16(self.target)) - try self.typeAlignment(.float, 16, 16, 16, false, writer); - if (self.target.cpu.arch != .arc) - try self.typeAlignment(.float, 32, 32, 32, false, writer); - try self.typeAlignment(.float, 64, 64, 64, false, writer); - if (self.target.cpu.arch.isX86()) try self.typeAlignment(.float, 80, 0, 0, false, writer); - try self.typeAlignment(.float, 128, 128, 128, false, writer); - } - switch (self.target.cpu.arch) { - .amdgcn => { - try self.typeAlignment(.vector, 16, 16, 16, false, writer); - try self.typeAlignment(.vector, 24, 32, 32, false, writer); - try self.typeAlignment(.vector, 32, 32, 32, false, writer); - try self.typeAlignment(.vector, 48, 64, 64, false, writer); - try self.typeAlignment(.vector, 96, 128, 128, false, writer); - try self.typeAlignment(.vector, 192, 256, 256, false, writer); - try self.typeAlignment(.vector, 256, 256, 256, false, writer); - try self.typeAlignment(.vector, 512, 512, 512, false, writer); - try self.typeAlignment(.vector, 1024, 1024, 1024, false, writer); - try self.typeAlignment(.vector, 2048, 2048, 2048, false, writer); - }, - .ve => {}, - else => { - try self.typeAlignment(.vector, 16, 32, 32, false, writer); - try self.typeAlignment(.vector, 32, 32, 32, false, writer); - try self.typeAlignment(.vector, 64, 64, 64, false, writer); - try self.typeAlignment(.vector, 128, 128, 128, true, writer); - }, - } - const swap_agg_nat = switch (self.target.cpu.arch) { - .x86, .x86_64 => switch (self.target.os.tag) { - .uefi, .windows => true, - else => false, - }, - .avr, .m68k => true, - else => false, - }; - if (!swap_agg_nat) try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); - if (self.target.cpu.arch == .csky) try writer.writeAll("-Fi32"); - for (@as([]const u24, switch (self.target.cpu.arch) { - .avr => &.{8}, - .msp430 => &.{ 8, 16 }, - .arc, - .arm, - .armeb, - .csky, - .loongarch32, - .mips, - .mipsel, - .powerpc, - .powerpcle, - .riscv32, - .sparc, - .thumb, - .thumbeb, - .xtensa, - => &.{32}, - .aarch64, - .aarch64_be, - .amdgcn, - .bpfeb, - .bpfel, - .loongarch64, - .mips64, - .mips64el, - .powerpc64, - .powerpc64le, - .riscv64, - .s390x, - .sparc64, - .ve, - .wasm32, - .wasm64, - => &.{ 32, 64 }, - .hexagon => &.{ 16, 32 }, - .m68k, - .x86, - => &.{ 8, 16, 32 }, - .nvptx, - .nvptx64, - => &.{ 16, 32, 64 }, - .x86_64 => &.{ 8, 16, 32, 64 }, - else => &.{}, - }), 0..) |natural, index| switch (index) { - 0 => try writer.print("-n{d}", .{natural}), - else => try writer.print(":{d}", .{natural}), - }; - if (swap_agg_nat) try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); - if (self.target.cpu.arch == .hexagon) { - try self.typeAlignment(.integer, 64, 64, 64, true, writer); - try self.typeAlignment(.integer, 32, 32, 32, true, writer); - try self.typeAlignment(.integer, 16, 16, 16, true, writer); - try self.typeAlignment(.integer, 1, 8, 8, true, writer); - try self.typeAlignment(.float, 32, 32, 32, true, writer); - try self.typeAlignment(.float, 64, 64, 64, true, writer); - } - if (stack_abi != ptr_bit_width or self.target.cpu.arch == .msp430 or - self.target.os.tag == .uefi or self.target.os.tag == .windows or - self.target.cpu.arch == .riscv32) - try writer.print("-S{d}", .{stack_abi}); - if (self.target.cpu.arch.isAARCH64()) - try writer.writeAll("-Fn32"); - switch (self.target.cpu.arch) { - .hexagon, .ve => { - try self.typeAlignment(.vector, 32, 128, 128, true, writer); - try self.typeAlignment(.vector, 64, 128, 128, true, writer); - try self.typeAlignment(.vector, 128, 128, 128, true, writer); - }, - else => {}, - } - if (self.target.cpu.arch != .amdgcn) { - try self.typeAlignment(.vector, 256, 128, 128, true, writer); - try self.typeAlignment(.vector, 512, 128, 128, true, writer); - try self.typeAlignment(.vector, 1024, 128, 128, true, writer); - try self.typeAlignment(.vector, 2048, 128, 128, true, writer); - try self.typeAlignment(.vector, 4096, 128, 128, true, writer); - try self.typeAlignment(.vector, 8192, 128, 128, true, writer); - try self.typeAlignment(.vector, 16384, 128, 128, true, writer); - } - const alloca_addr_space = llvmAllocaAddressSpace(self.target); - if (alloca_addr_space != .default) try writer.print("-A{d}", .{@intFromEnum(alloca_addr_space)}); - const global_addr_space = llvmDefaultGlobalAddressSpace(self.target); - if (global_addr_space != .default) try writer.print("-G{d}", .{@intFromEnum(global_addr_space)}); - if (any_non_integral) { - try writer.writeAll("-ni"); - for (addr_space_info) |info| if (info.non_integral) - try writer.print(":{d}", .{@intFromEnum(info.llvm)}); - } - } - - fn typeAlignment( - self: DataLayoutBuilder, - kind: enum { integer, vector, float, aggregate }, - size: u24, - default_abi: u24, - default_pref: u24, - default_force_pref: bool, - writer: anytype, - ) @TypeOf(writer).Error!void { - var abi = default_abi; - var pref = default_pref; - var force_abi = false; - var force_pref = default_force_pref; - if (kind == .float and size == 80) { - abi = 128; - pref = 128; - } - for (@as([]const std.Target.CType, switch (kind) { - .integer => &.{ .char, .short, .int, .long, .longlong }, - .float => &.{ .float, .double, .longdouble }, - .vector, .aggregate => &.{}, - })) |cty| { - if (self.target.cTypeBitSize(cty) != size) continue; - abi = self.target.cTypeAlignment(cty) * 8; - pref = self.target.cTypePreferredAlignment(cty) * 8; - break; - } - switch (kind) { - .integer => { - if (self.target.ptrBitWidth() <= 16 and size >= 128) return; - abi = @min(abi, Type.maxIntAlignment(self.target) * 8); - switch (self.target.cpu.arch) { - .aarch64, - .aarch64_be, - => if (size == 128) { - abi = size; - pref = size; - } else switch (self.target.os.tag) { - .macos, .ios, .watchos, .tvos, .visionos => {}, - .uefi, .windows => { - pref = size; - force_abi = size >= 32; - }, - else => pref = @max(pref, 32), - }, - .arc => if (size <= 64) { - abi = @min((std.math.divCeil(u24, size, 8) catch unreachable) * 8, 32); - pref = 32; - force_abi = true; - force_pref = size <= 32; - }, - .bpfeb, - .bpfel, - .nvptx, - .nvptx64, - .riscv64, - => if (size == 128) { - abi = size; - pref = size; - }, - .csky => if (size == 32 or size == 64) { - abi = 32; - pref = 32; - force_abi = true; - force_pref = true; - }, - .hexagon => force_abi = true, - .m68k => if (size <= 32) { - abi = @min(size, 16); - pref = size; - force_abi = true; - force_pref = true; - } else if (size == 64) { - abi = 32; - pref = size; - }, - .mips, - .mipsel, - .mips64, - .mips64el, - => pref = @max(pref, 32), - .s390x => pref = @max(pref, 16), - .ve => if (size == 64) { - abi = size; - pref = size; - }, - .xtensa => if (size <= 64) { - pref = @max(size, 32); - abi = size; - force_abi = size == 64; - }, - .x86 => switch (size) { - 128 => { - abi = size; - pref = size; - }, - else => {}, - }, - .x86_64 => switch (size) { - 64, 128 => { - abi = size; - pref = size; - }, - else => {}, - }, - .loongarch64 => switch (size) { - 128 => { - abi = size; - pref = size; - force_abi = true; - }, - else => {}, - }, - else => {}, - } - }, - .vector => if (self.target.cpu.arch.isArm()) { - switch (size) { - 128 => abi = 64, - else => {}, - } - } else if ((self.target.cpu.arch.isPowerPC64() and self.target.os.tag == .linux and - (size == 256 or size == 512)) or - (self.target.cpu.arch.isNvptx() and (size == 16 or size == 32))) - { - force_abi = true; - abi = size; - pref = size; - } else if (self.target.cpu.arch == .amdgcn and size <= 2048) { - force_abi = true; - } else if (self.target.cpu.arch == .csky and (size == 64 or size == 128)) { - abi = 32; - pref = 32; - force_pref = true; - } else if (self.target.cpu.arch == .hexagon and - ((size >= 32 and size <= 64) or (size >= 512 and size <= 2048))) - { - abi = size; - pref = size; - force_pref = true; - } else if (self.target.cpu.arch == .s390x and size == 128) { - abi = 64; - pref = 64; - force_pref = false; - } else if (self.target.cpu.arch == .ve and (size >= 64 and size <= 16384)) { - abi = 64; - pref = 64; - force_abi = true; - force_pref = true; - }, - .float => switch (self.target.cpu.arch) { - .amdgcn => if (size == 128) { - abi = size; - pref = size; - }, - .arc => if (size == 32 or size == 64) { - abi = 32; - pref = 32; - force_abi = true; - force_pref = size == 32; - }, - .avr, .msp430, .sparc64 => if (size != 32 and size != 64) return, - .csky => if (size == 32 or size == 64) { - abi = 32; - pref = 32; - force_abi = true; - force_pref = true; - }, - .hexagon => if (size == 32 or size == 64) { - force_abi = true; - }, - .ve, .xtensa => if (size == 64) { - abi = size; - pref = size; - }, - .wasm32, .wasm64 => if (self.target.os.tag == .emscripten and size == 128) { - abi = 64; - pref = 64; - }, - else => {}, - }, - .aggregate => if (self.target.os.tag == .uefi or self.target.os.tag == .windows or - self.target.cpu.arch.isArm()) - { - pref = @min(pref, self.target.ptrBitWidth()); - } else switch (self.target.cpu.arch) { - .arc, .csky => { - abi = 0; - pref = 32; - }, - .hexagon => { - abi = 0; - pref = 0; - }, - .m68k => { - abi = 0; - pref = 16; - }, - .msp430 => { - abi = 8; - pref = 8; - }, - .s390x => { - abi = 8; - pref = 16; + break :blk if (target.ofmt == .coff) + if (msvc) + "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32-a:0:32-S32" + else + "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32" + else if (msvc) + "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32-a:0:32-S32" + else + "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32"; }, - else => {}, }, - } - if (kind != .vector and self.target.cpu.arch == .avr) { - force_abi = true; - abi = 8; - pref = 8; - } - if (!force_abi and abi == default_abi and pref == default_pref) return; - try writer.print("-{c}", .{@tagName(kind)[0]}); - if (size != 0) try writer.print("{d}", .{size}); - try writer.print(":{d}", .{abi}); - if (pref != abi or force_pref) try writer.print(":{d}", .{pref}); - } -}; + else => if (target.ofmt == .macho) + "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128" + else + "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128", + }, + .x86_64 => if (target.os.tag.isDarwin() or target.ofmt == .macho) + "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + else switch (target.abi) { + .gnux32, .muslx32 => "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", + else => if (target.os.tag == .windows and target.ofmt == .coff) + "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + else + "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", + }, + .spirv => "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", + .spirv32 => "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", + .spirv64 => "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", + .wasm32 => if (target.os.tag == .emscripten) + "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-f128:64-n32:64-S128-ni:1:10:20" + else + "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20", + .wasm64 => if (target.os.tag == .emscripten) + "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-f128:64-n32:64-S128-ni:1:10:20" + else + "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20", + .ve => "e-m:e-i64:64-n32:64-S128-v64:64:64-v128:64:64-v256:64:64-v512:64:64-v1024:64:64-v2048:64:64-v4096:64:64-v8192:64:64-v16384:64:64", + .csky => "e-m:e-S32-p:32:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:32-v128:32:32-a:0:32-Fi32-n32", + .loongarch32 => "e-m:e-p:32:32-i64:64-n32-S128", + .loongarch64 => "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128", + .xtensa => "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32", + + .kalimba, + .propeller, + => unreachable, // Gated by hasLlvmSupport(). + }; +} pub const Object = struct { gpa: Allocator, @@ -856,7 +542,7 @@ pub const Object = struct { }); errdefer builder.deinit(); - builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = target }}); + builder.data_layout = try builder.string(dataLayout(target)); const debug_compile_unit, const debug_enums_fwd_ref, const debug_globals_fwd_ref = if (!builder.strip) debug_info: { @@ -12071,7 +11757,7 @@ fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Targe } fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool { - const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret); + const class = x86_64_abi.classifySystemV(ty, zcu, &target, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; return false; @@ -12181,7 +11867,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E return o.lowerType(return_type); } const target = zcu.getTarget(); - const classes = x86_64_abi.classifySystemV(return_type, zcu, target, .ret); + const classes = x86_64_abi.classifySystemV(return_type, zcu, &target, .ret); if (classes[0] == .memory) return .void; var types_index: u32 = 0; var types_buffer: [8]Builder.Type = undefined; @@ -12459,7 +12145,7 @@ const ParamTypeIterator = struct { const zcu = it.object.pt.zcu; const ip = &zcu.intern_pool; const target = zcu.getTarget(); - const classes = x86_64_abi.classifySystemV(ty, zcu, target, .arg); + const classes = x86_64_abi.classifySystemV(ty, zcu, &target, .arg); if (classes[0] == .memory) { it.zig_index += 1; it.llvm_index += 1; diff --git a/src/main.zig b/src/main.zig index 1075993846b8..3d6e96b91eea 100644 --- a/src/main.zig +++ b/src/main.zig @@ -39,7 +39,7 @@ test { _ = Package; } -const thread_stack_size = 32 << 20; +const thread_stack_size = 50 << 20; pub const std_options: std.Options = .{ .wasiCwd = wasi_cwd, diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index f2fa2f4b0a46..4b11f61f1926 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -132,13 +132,20 @@ test "cmp f16" { try comptime testCmp(f16); } -test "cmp f32/f64" { +test "cmp f32" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testCmp(f32); try comptime testCmp(f32); +} + +test "cmp f64" { + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 + try testCmp(f64); try comptime testCmp(f64); } @@ -224,6 +231,98 @@ fn testCmp(comptime T: type) !void { } } +test "vector cmp f16" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; + + try testCmpVector(f16); + try comptime testCmpVector(f16); +} + +test "vector cmp f32" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; + + try testCmpVector(f32); + try comptime testCmpVector(f32); +} + +test "vector cmp f64" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; + + try testCmpVector(f64); + try comptime testCmpVector(f64); +} + +test "vector cmp f128" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; + + try testCmpVector(f128); + try comptime testCmpVector(f128); +} + +test "vector cmp f80/c_longdouble" { + if (true) return error.SkipZigTest; + + try testCmpVector(f80); + try comptime testCmpVector(f80); + try testCmpVector(c_longdouble); + try comptime testCmpVector(c_longdouble); +} +fn testCmpVector(comptime T: type) !void { + var edges = [_]T{ + -math.inf(T), + -math.floatMax(T), + -math.floatMin(T), + -math.floatTrueMin(T), + -0.0, + math.nan(T), + 0.0, + math.floatTrueMin(T), + math.floatMin(T), + math.floatMax(T), + math.inf(T), + }; + _ = &edges; + for (edges, 0..) |rhs, rhs_i| { + const rhs_v: @Vector(4, T) = .{ rhs, rhs, rhs, rhs }; + for (edges, 0..) |lhs, lhs_i| { + const no_nan = lhs_i != 5 and rhs_i != 5; + const lhs_order = if (lhs_i < 5) lhs_i else lhs_i - 2; + const rhs_order = if (rhs_i < 5) rhs_i else rhs_i - 2; + const lhs_v: @Vector(4, T) = .{ lhs, lhs, lhs, lhs }; + try expect(@reduce(.And, (lhs_v == rhs_v)) == (no_nan and lhs_order == rhs_order)); + try expect(@reduce(.And, (lhs_v != rhs_v)) == !(no_nan and lhs_order == rhs_order)); + try expect(@reduce(.And, (lhs_v < rhs_v)) == (no_nan and lhs_order < rhs_order)); + try expect(@reduce(.And, (lhs_v > rhs_v)) == (no_nan and lhs_order > rhs_order)); + try expect(@reduce(.And, (lhs_v <= rhs_v)) == (no_nan and lhs_order <= rhs_order)); + try expect(@reduce(.And, (lhs_v >= rhs_v)) == (no_nan and lhs_order >= rhs_order)); + } + } +} + test "different sized float comparisons" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -1703,3 +1802,33 @@ test "optimized float mode" { try expect(S.optimized(small) == small); try expect(S.strict(small) == tiny); } + +fn MakeType(comptime x: anytype) type { + return struct { + fn get() @TypeOf(x) { + return x; + } + }; +} + +const nan_a: f32 = @bitCast(@as(u32, 0xffc00000)); +const nan_b: f32 = @bitCast(@as(u32, 0xffe00000)); + +fn testMemoization() !void { + try expect(MakeType(nan_a) == MakeType(nan_a)); + try expect(MakeType(nan_b) == MakeType(nan_b)); + try expect(MakeType(nan_a) != MakeType(nan_b)); +} + +fn testVectorMemoization(comptime T: type) !void { + const nan_a_v: T = @splat(nan_a); + const nan_b_v: T = @splat(nan_b); + try expect(MakeType(nan_a_v) == MakeType(nan_a_v)); + try expect(MakeType(nan_b_v) == MakeType(nan_b_v)); + try expect(MakeType(nan_a_v) != MakeType(nan_b_v)); +} + +test "comptime calls are only memoized when float arguments are bit-for-bit equal" { + try comptime testMemoization(); + try comptime testVectorMemoization(@Vector(4, f32)); +} diff --git a/test/cases/compile_errors/union_field_ordered_differently_than_enum.zig b/test/cases/compile_errors/union_field_ordered_differently_than_enum.zig new file mode 100644 index 000000000000..5c86fb4080cd --- /dev/null +++ b/test/cases/compile_errors/union_field_ordered_differently_than_enum.zig @@ -0,0 +1,27 @@ +const Tag = enum { a, b }; + +const Union = union(Tag) { + b, + a, +}; + +const BaseUnion = union(enum) { + a, + b, +}; + +const GeneratedTagUnion = union(@typeInfo(BaseUnion).@"union".tag_type.?) { + b, + a, +}; + +export fn entry() usize { + return @sizeOf(Union) + @sizeOf(GeneratedTagUnion); +} + +// error +// +// :4:5: error: union field 'b' ordered differently than corresponding enum field +// :1:23: note: enum field here +// :14:5: error: union field 'b' ordered differently than corresponding enum field +// :10:5: note: enum field here diff --git a/test/link/elf.zig b/test/link/elf.zig index 1afe1777d393..92b857a565c5 100644 --- a/test/link/elf.zig +++ b/test/link/elf.zig @@ -114,7 +114,8 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step { elf_step.dependOn(testLargeBss(b, .{ .target = gnu_target })); elf_step.dependOn(testLinkOrder(b, .{ .target = gnu_target })); elf_step.dependOn(testLdScript(b, .{ .target = gnu_target })); - elf_step.dependOn(testLdScriptPathError(b, .{ .target = gnu_target })); + // https://github.com/ziglang/zig/issues/23125 + // elf_step.dependOn(testLdScriptPathError(b, .{ .target = gnu_target })); elf_step.dependOn(testLdScriptAllowUndefinedVersion(b, .{ .target = gnu_target, .use_lld = true })); elf_step.dependOn(testLdScriptDisallowUndefinedVersion(b, .{ .target = gnu_target, .use_lld = true })); // https://github.com/ziglang/zig/issues/17451