zig

0.2.0 Release Notes

Download & Documentation

This is a huge release, containing several high-impact features such as unions, inferred error sets, and coroutines, and features changes from 16 different contributors.

Special thanks to my sponsors who provide financial support. You're making Zig sustainable.

Unions & Enums

Zig now supports unions. They come in a few flavors:

Enums are no longer tagged unions; they are simple enumerations. They also allow you to specify the integer tag type with enum(IntType). And finally, enum fields can specify the integer tag value, so enums no longer have to start at 0 and count upwards. Enum types which have explicit integer tag types are allowed inside packed structs.

C translation now understands C unions and translates them to extern union. It also understands the GNU extension to specify the integer tag type of enums and translates it accordingly.

const TheTag = enum {A, B, C};
const TheUnion = union(TheTag) { A: i32, B: i32, C: i32 };
test "union field access gives the enum values" {
    assert(TheUnion.A == TheTag.A);
    assert(TheUnion.B == TheTag.B);
    assert(TheUnion.C == TheTag.C);
}

If you want to auto-create an enum for a union, you can use the enum keyword like this:

const TheUnion2 = union(enum) {
    Item1,
    Item2: i32,
};

You can switch on a union-enum just like you could previously with an enum:

const SwitchProngWithVarEnum = union(enum) {
    One: i32,
    Two: f32,
    Meh: void,
};
fn switchProngWithVarFn(a: &const SwitchProngWithVarEnum) {
    switch(*a) {
        SwitchProngWithVarEnum.One => |x| {
            assert(x == 13);
        },
        SwitchProngWithVarEnum.Two => |x| {
            assert(x == 13.0);
        },
        SwitchProngWithVarEnum.Meh => |x| {
            const v: void = x;
        },
    }
}

However, if you do not give an enum to a union, the tag value is not visible to the programmer:

const Payload = union {
    A: i32,
    B: f64,
    C: bool,
};
export fn entry() {
    const a = Payload { .A = 1234 };
    foo(a);
}
fn foo(a: &const Payload) {
    switch (*a) {
        Payload.A => {},
        else => unreachable,
    }
}
test.zig:11:13: error: switch on union which has no attached enum
    switch (*a) {
            ^
test.zig:1:17: note: consider 'union(enum)' here
const Payload = union {
                ^
test.zig:12:16: error: container 'Payload' has no member called 'A'
        Payload.A => {},
               ^

There is still debug safety though!

const Foo = union {
    float: f32,
    int: u32,
};

pub fn main() -> void {
    var f = Foo { .int = 42 };
    bar(&f);
}

fn bar(f: &Foo) {
    f.float = 12.34;
}
access of inactive union field
lib/zig/std/special/panic.zig:12:35: 0x0000000000203674 in ??? (test)
        @import("std").debug.panic("{}", msg);
                                  ^
test.zig:12:6: 0x0000000000217bd7 in ??? (test)
    f.float = 12.34;
     ^
test.zig:8:8: 0x0000000000217b7c in ??? (test)
    bar(&f);
       ^
Aborted

However, if you make an extern union to be compatible with C code, there is no debug safety, just like a C union.

Other tidbits:

test "cast tag type of union to union" {
    var x: Value2 = Letter2.B;
    assert(Letter2(x) == Letter2.B);
}
const Letter2 = enum { A, B, C };
const Value2 = union(Letter2) { A: i32, B, C, };

test "implicit cast union to its tag type" {
    var x: Value2 = Letter2.B;
    assert(x == Letter2.B);
    giveMeLetterB(x);
}
fn giveMeLetterB(x: Letter2) {
    assert(x == Value2.B);
}

Labeled loops, blocks, break, and continue, and R.I.P. goto

We used to have labels and goto like this:

export fn entry() {
    label:
    goto label;
}

Now this does not work, because goto is gone.

test.zig:2:10: error: expected token ';', found ':'
    label:
         ^

There are a few reasons to use goto, but all of the use cases are better served with other zig control flow features:

goto backward

export fn entry() {
    start_over:

    while (some_condition) {
        // do something...
        goto start_over;
    }
}

Instead, use a loop!

export fn entry() {
    outer: while (true) {

        while (some_condition) {
            // do something...
            continue :outer;
        }

        break;
    }
}

goto forward

pub fn findSection(elf: &Elf, name: []const u8) !?&SectionHeader {
    var file_stream = io.FileInStream.init(elf.in_file);
    const in = &file_stream.stream;

    section_loop: for (elf.section_headers) |*elf_section| {
        if (elf_section.sh_type == SHT_NULL) continue;

        const name_offset = elf.string_section.offset + elf_section.name;
        try elf.in_file.seekTo(name_offset);

        for (name) |expected_c| {
            const target_c = try in.readByte();
            if (target_c == 0 or expected_c != target_c) goto next_section;
        }

        {
            const null_byte = try in.readByte();
            if (null_byte == 0) return elf_section;
        }
next_section:
    }

    return null;
}

Looks like the use case is breaking out of an outer loop:

pub fn findSection(elf: &Elf, name: []const u8) !?&SectionHeader {
    var file_stream = io.FileInStream.init(elf.in_file);
    const in = &file_stream.stream;

    section_loop: for (elf.section_headers) |*elf_section| {
        if (elf_section.sh_type == SHT_NULL) continue;

        const name_offset = elf.string_section.offset + elf_section.name;
        try elf.in_file.seekTo(name_offset);

        for (name) |expected_c| {
            const target_c = try in.readByte();
            if (target_c == 0 or expected_c != target_c) continue :section_loop;
        }

        {
            const null_byte = try in.readByte();
            if (null_byte == 0) return elf_section;
        }
    }

    return null;
}

You can also break out of arbitrary blocks:

export fn entry() {
    outer: {

        while (some_condition) {
            // do something...
            break :outer;
        }
    }
}

This can be used to return a value from a block in the same way you can return a value from a function:

export fn entry() {
    const value = init: {
        for (slice) |item| {
            if (item > 100)
                break :init item;
        }
        break :init 0;
    };
}

Omitting a semicolon no longer causes the value to be returned by the block. Instead you must use explicit block labels to return a value from a block. I'm considering a keyword such as result which defaults to the current block.

See #346, #630, and #629.

Error Syntax Cleanup

One of the biggest complaints newcomers to Zig had was about its sigils regarding error handling. Given this, I made an effort to choose friendlier syntax.

After these changes, there is a strong pattern that only keywords can modify control flow. For example we have and and or instead of && and ||. There is one last exception, which is a ?? b. Maybe it's okay, since C# set a precedent.

Error Return Traces

I'm really excited about this one. I invented a new kind of debugging tool and integrated it into Debug and ReleaseSafe builds.

One of the concerns with removing the %% prefix operator was that it was just so gosh darn convenient to get a stack trace right at the moment where you asserted that a value did not have an error. I wanted to make it so that programmers could use try everywhere and still get the debuggability benefit when an error occurred.

Watch this:

const std = @import("std");

pub fn main() !void {
    const allocator = std.debug.global_allocator;

    const args = try std.os.argsAlloc(allocator);
    defer std.os.argsFree(allocator, args);

    const count = try parseFile(allocator, args[1]);

    if (count < 10) return error.NotEnoughItems;
}

fn parseFile(allocator: &std.mem.Allocator, file_path: []const u8) !usize {
    const contents = std.io.readFileAlloc(allocator, file_path) catch return error.UnableToReadFile;
    defer allocator.free(contents);

    return contents.len;
}

Here's a simple program with a bunch of different ways that errors could get returned from main. In our test example, we're going to open a bogus file that does not exist.

$ zig build-exe test2.zig
$ ./test2 bogus-does-not-exist.txt
error: UnableToReadFile
/home/andy/dev/zig/build/lib/zig/std/os/index.zig:301:33: 0x000000000021acd0 in ??? (test2)
                posix.ENOENT => return PosixOpenError.PathNotFound,
                                ^
/home/andy/dev/zig/build/lib/zig/std/os/file.zig:25:24: 0x00000000002096f6 in ??? (test2)
            const fd = try os.posixOpen(allocator, path, flags, 0);
                       ^
/home/andy/dev/zig/build/lib/zig/std/io.zig:267:16: 0x000000000021ebec in ??? (test2)
    var file = try File.openRead(allocator, path);
               ^
/home/andy/dev/zig/build/test2.zig:15:71: 0x000000000021ce72 in ??? (test2)
    const contents = std.io.readFileAlloc(allocator, file_path) catch return error.UnableToReadFile;
                                                                      ^
/home/andy/dev/zig/build/test2.zig:9:19: 0x000000000021c1f9 in ??? (test2)
    const count = try parseFile(allocator, args[1]);
                  ^

This is not a stack trace snapshot from when an error was "created". This is a return trace of all the points in the code where an error was returned from a function.

Note that, if it only told you the origin of the error that we ultimately received - UnableToReadFile - we would only see the bottom 2 items in the trace. Not only do we have this information, we have all the information about the origin of the error, right up to the fact that we received ENOENT from open.

With this in place, programmers can comfortably use try everywhere, safe in the knowledge that it will be straightforward to troubleshoot the origin of any error bubbling up through the system.

I hope you're skeptically wondering, OK, what's the tradeoff in terms of binary size, performance, and memory?

First of all, this feature is disabled in ReleaseFast mode. So the answer is, literally no cost, in this case. But what about Debug and ReleaseSafe builds?

To analyze performance cost, there are two cases:

For the case when no errors are returned, the cost is a single memory write operation, only in the first non-failable function in the call graph that calls a failable function, i.e. when a function returning void calls a function returning error. This is to initialize this struct in the stack memory:

pub const StackTrace = struct {
    index: usize,
    instruction_addresses: [N]usize,
};

Here, N is the maximum function call depth as determined by call graph analysis. Recursion is ignored and counts for 2.

A pointer to StackTrace is passed as a secret parameter to every function that can return an error, but it's always the first parameter, so it can likely sit in a register and stay there.

That's it for the path when no errors occur. It's practically free in terms of performance.

When generating the code for a function that returns an error, just before the return statement (only for the return statements that return errors), Zig generates a call to this function:

noinline fn __zig_return_error(stack_trace: &StackTrace) void {
    stack_trace.instruction_addresses[stack_trace.index] = @returnAddress();
    stack_trace.index = (stack_trace.index + 1) % N;
}

The cost is 2 math operations plus some memory reads and writes. The memory accessed is constrained and should remain cached for the duration of the error return bubbling.

As for code size cost, 1 function call before a return statement is no big deal. Even so, I have a plan to make the call to __zig_return_error a tail call, which brings the code size cost down to actually zero. What is a return statement in code without error return tracing can become a jump instruction in code with error return tracing.

There are a few ways to activate this error return tracing feature:

Related issues: #651 #684

Error Sets

Code that could be any error in the entire program can use error!T to get the previous behavior. But the new best practice is to use error sets.

// error union looks like this
// this could be any error in the entire program
const x: error!i32 = 1234;

// declare an error set
const MyErrSet = error {OutOfMemory, FileNotFound};

// error union with an error set
const y: MyErrSet!i32 = 5678;
const z1: MyErrSet!i32 = MyErrSet.OutOfMemory;

// there is a shortcut for this:
const z2: MyErrSet!i32 = error{OutOfMemory}.OutOfMemory;
// this is the same as
const z2: MyErrSet!i32 = error.OutOFMemory;

// leave off the error set in a function return type to
// have it infer the error set
fn foo() !i32 {
    // this declares the ItFailed error
    return error{ItFailed};
}

// merge error sets
const ErrSetA = error{
    /// ErrSetA doc comment
    BadValue,
    Accident,
};
const ErrSetB = error{
    //// ErrSetB doc comment
    BadValue,
    Broken,
};
// doc comment of MergedErrSet.BadValue is "ErrSetA doc comment"
// MergedErrSet contains {BadValue, Accident, Broken}
const MergedErrSet = ErrSetA || ErrSetB;

Most functions in zig can use error set inference:

fn foo(x: i32) !void {
    return std.math.add(x, 1);
}

Switching on the possible error from this function gives:

const std = @import("std");

test "inferred error set" {
    foo(1234) catch |e| switch (e) {

    };
}

fn foo(x: i32) !bool {
    const y = try std.math.add(i32, x, 1);
    return y == 10;
}

Compile errors:

test.zig:4:25: error: error.Overflow not handled in switch
    foo(1234) catch |e| switch (e) {
                        ^

Coroutines

In this release, Zig gains the keywords:

Zig now has support for coroutines. However, the feature is so brand-spanking new that there is not yet any documentation or non-trivial usage examples. I plan to make a blog post about this soon.

Documentation

All the outdated docs are fixed, and we have automatic docgen tool which:

The tool is, of course, written in Zig. #465

In addition to the above, the following improvements were made to the documentation:

There is still much more to document, before we have achieved basic documentation for everything.

Translating C Code

parse-c has been renamed to translate-c.

Performance is improved. Previously we did linear search to find existing global declarations; now we index using a hash map.

Building tetris went from taking 5.3 sec to 0.76 sec.

In addition the following changes were made:

Self-Hosted Compiler Progress

The self-hosted compiler effort has begun.

So far we have a tokenizer, and an incomplete parser and formatter. The code uses no recursion and therefore has compile-time known stack space usage. See #157

The self-hosted compiler works on every supported platform, is built using the zig build system, tested with zig test, links against LLVM, and can import 100% of the LLVM symbols from the LLVM C-API .h files - even the inline functions.

There is one C++ file in Zig which uses the more powerful LLVM C++ API (for example to create debug information) and exposes a C API. This file is now shared between the C++ self-hosted compiler and the self-hosted compiler. In stage1, we create a static library with this one file in it, and then use that library in both the C++ compiler and the self-hosted compiler.

The self hosted tokenizer and parser have no external dependencies, and are therefore included as part of the standard library.

Zig Build System

Many improvements are planned but were not the focus of 0.2.0.

Standard Library API Changes

Higher level arg-parsing API

It's really a shame that Windows command line parsing requires you to allocate memory. This means that to have a cross-platform API for command line arguments, even though in POSIX it can never fail, we have to handle the possibility because of Windows. This lead to a command line args API like this:

pub fn main() -> !void {
    var arg_it = os.args();
    // skip my own exe name
    _ = arg_it.skip();
    while (arg_it.next(allocator)) |err_or_arg| {
        const arg = try err_or_arg;
        defer allocator.free(arg);
        // use the arg...
    }
}

Yikes, a bit cumbersome. I added a higher level API. Now you can call std.os.argsAlloc and get a error{OutOfMem}![]const []u8, and you just have to call std.os.argsFree when you're done with it.

pub fn main() -> !void {
        const allocator = std.heap.c_allocator;

        const args = try os.argsAlloc(allocator);
        defer os.argsFree(allocator, args);

        var arg_i: usize = 1;
        while (arg_i < args.len) : (arg_i += 1) {
            const arg = args[arg_i];
            // do something with arg...
        }
    }

Better! Single point of failure.

For now this uses the other API under the hood, but it could be reimplemented with the same API to do a single allocation.

I added a new kind of test to make sure command line argument parsing works.

std.sort

Hejsil pointed out that the quicksort implementation in the standard library failed a simple test case.

There was another problem with the implementation of sort in the standard library, which is that it used O(n) stack space via recursion. This is fundamentally insecure, especially if you consider that the length of an array you might want to sort could be user input. It prevents #157 from working as well.

I had a look at Wikipedia's Comparison of Sorting Algorithms and only 1 sorting algorithm checked all the boxes:

And that algorithm is Block sort.

I found a high quality implementation of block sort in C, which is licensed under the public domain.

I ported the code from C to Zig, integrated it into the standard library, and it passed all tests first try. Amazing.

Surely, I thought, there must be some edge case. So I created a simple fuzz tester:

test "sort fuzz testing" {
    var rng = std.rand.Rand.init(0x12345678);
    const test_case_count = 10;
    var i: usize = 0;
    while (i < test_case_count) : (i += 1) {
        try fuzzTest(&rng);
    }
}

var fixed_buffer_mem: [100 * 1024]u8 = undefined;

fn fuzzTest(rng: &std.rand.Rand) !void {
    const array_size = rng.range(usize, 0, 1000);
    var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
    var array = try fixed_allocator.allocator.alloc(IdAndValue, array_size);
    // populate with random data
    for (array) |*item, index| {
        item.id = index;
        item.value = rng.range(i32, 0, 100);
    }
    sort(IdAndValue, array, cmpByValue);

    var index: usize = 1;
    while (index < array.len) : (index += 1) {
        if (array[index].value == array[index - 1].value) {
            assert(array[index].id > array[index - 1].id);
        } else {
            assert(array[index].value > array[index - 1].value);
        }
    }
}

This test passed as well. And so I think this problem is solved.

Crypto Additions

Integer Rotation Functions

/// Rotates right. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotr(comptime T: type, x: T, r: var) -> T {
test "math.rotr" {
    assert(rotr(u8, 0b00000001, usize(0))  == 0b00000001);
    assert(rotr(u8, 0b00000001, usize(9))  == 0b10000000);
    assert(rotr(u8, 0b00000001, usize(8))  == 0b00000001);
    assert(rotr(u8, 0b00000001, usize(4))  == 0b00010000);
    assert(rotr(u8, 0b00000001, isize(-1)) == 0b00000010);
}
/// Rotates left. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotl(comptime T: type, x: T, r: var) -> T {
test "math.rotl" {
    assert(rotl(u8, 0b00000001, usize(0))  == 0b00000001);
    assert(rotl(u8, 0b00000001, usize(9))  == 0b00000010);
    assert(rotl(u8, 0b00000001, usize(8))  == 0b00000001);
    assert(rotl(u8, 0b00000001, usize(4))  == 0b00010000);
    assert(rotl(u8, 0b00000001, isize(-1)) == 0b10000000);
}

MD5 and SHA1 Hash Functions

Marc writes:

Some performance comparisons to C.

We take the fastest time measurement taken across multiple runs.

The block hashing functions use the same md5/sha1 methods.

Cpu: Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz
Gcc: 7.2.1 20171224
Clang: 5.0.1
Zig: 0.1.1.304f6f1d

See https://www.nayuki.io/page/fast-md5-hash-implementation-in-x86-assembly:

gcc -O2
    661 Mb/s
clang -O2
    490 Mb/s
zig --release-fast and zig --release-safe
    570 Mb/s
zig
    50 Mb/s

See https://www.nayuki.io/page/fast-sha1-hash-implementation-in-x86-assembly :

gcc -O2
    588 Mb/s
clang -O2
    563 Mb/s
zig --release-fast and zig --release-safe
    610 Mb/s
zig
    21 Mb/s

In short, zig provides pretty useful tools for writing this sort of code. We are in the lead against clang (which uses the same LLVM backend) with us being slower only against md5 with GCC.

SHA-2 Functions

Marc writes:

We take the fastest time measurement taken across multiple runs. Tested across multiple compiler flags and the best chosen.

Cpu: Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz
Gcc: 7.2.1 20171224
Clang: 5.0.1
Zig: 0.1.1.304f6f1d

See https://www.nayuki.io/page/fast-sha2-hashes-in-x86-assembly.

Gcc -O2
    219 Mb/s
Clang -O2
    213 Mb/s
Zig --release-fast
    284 Mb/s
Zig --release-safe
    211 Mb/s
Zig
    6 Mb/s
Gcc -O2
    350 Mb/s
Clang -O2
    354 Mb/s
Zig --release-fast
    426 Mb/s
Zig --release-safe
    300 Mb/s
Zig
    11 Mb/s

Blake2 Hash Functions

Marc writes:

Blake performance numbers for reference:

Cpu: Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz

-- Blake2s

Zig --release-fast
    485 Mb/s
Zig --release-safe
    377 Mb/s
Zig
    11 Mb/s

-- Blake2b

Zig --release-fast
    616 Mb/s
Zig --release-safe
    573 Mb/s
Zig
    18 Mb/s

Sha3 Hashing Functions

Marc writes:

Initially we had a comptime bug which did not allow us to unroll the inner Sha3 functions. Once this was fixed we saw a large, near 3x speed boost.

Cpu: Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz

-- Sha3-256

Zig --release-fast
    265 Mb/s
Zig --release-safe
    249 Mb/s
Zig
    10 Mb/s

-- Sha3-512

Zig --release-fast
    144 Mb/s
Zig --release-safe
    143 Mb/s
Zig
    5 Mb/s

introduce std.heap.ArenaAllocator and std.heap.DirectAllocator

The self hosted parser is updated to take advantage of ArenaAllocator for the AST that it returns. This significantly reduces the complexity of cleanup code.

docgen and build runner are updated to use the combination of ArenaAllocator and DirectAllocator instead of IncrementingAllocator, which is now deprecated in favor of FixedBufferAllocator combined with DirectAllocator.

Added asserts in Allocator to ensure that implementors of the interface return slices of the correct size.

Fixed a bug in Allocator when you call realloc to grow the allocation.

Removed std.heap.IncrementingAllocator. Use std.heap.FixedBufferAllocator combined with std.heap.DirectAllocator instead.

@export

There is now an @export builtin function which can be used in a comptime block to conditionally export a function:

const builtin = @import("builtin");

comptime {
    const strong_linkage = builtin.GlobalLinkage.Strong;
    if (builtin.link_libc) {
        @export("main", main, strong_linkage);
    } else if (builtin.os == builtin.Os.windows) {
        @export("WinMainCRTStartup", WinMainCRTStartup, strong_linkage);
    } else {
        @export("_start", _start, strong_linkage);
    }
}

It can also be used to create aliases:

const builtin = @import("builtin");
const is_test = builtin.is_test;

comptime {
    const linkage = if (is_test) builtin.GlobalLinkage.Internal else builtin.GlobalLinkage.Weak;
    const strong_linkage = if (is_test) builtin.GlobalLinkage.Internal else builtin.GlobalLinkage.Strong;

    @export("__letf2", @import("comparetf2.zig").__letf2, linkage);
    @export("__getf2", @import("comparetf2.zig").__getf2, linkage);

    if (!is_test) {
        // only create these aliases when not testing
        @export("__cmptf2", @import("comparetf2.zig").__letf2, linkage);
        @export("__eqtf2", @import("comparetf2.zig").__letf2, linkage);
        @export("__lttf2", @import("comparetf2.zig").__letf2, linkage);
        @export("__netf2", @import("comparetf2.zig").__letf2, linkage);
        @export("__gttf2", @import("comparetf2.zig").__getf2, linkage);
    }
}

Previous export syntax is still allowed. See #462 and #420 blaze it.

@atomicRmw

The @atomicRmw function can be used to do an atomic read-modify-write.

New IR pass iteration strategy

Before:

while (cond) {
    if (false) { }
    break;
}

Pretty crazy right? Something as simple as this would crash the compiler.

Now:

This improvement deletes a lot of messy code:

 5 files changed, 288 insertions(+), 1243 deletions(-) 

And it also fixes comptime branches not being respected sometimes:

export fn entry() {
    while (false) {
        @compileError("bad");
    }
}

Before, this would cause a compile error. Now the while loop respects the implicit compile-time.

See #667.

Syntax: Mandatory Function Return Type

-> is removed, and all functions require an explicit return type.

The purpose of this is:

This has been a controversial change, and may be reverted.

Generating .h Files

Marc Tiehuis added array type handling:

const Foo = extern struct {
    A: [2]i32,
    B: [4]&u32,
};
export fn entry(foo: Foo, bar: [3]u8) void { }

This generates:

struct Foo {
    int32_t A[2];
    uint32_t * B[4];
};

TEST_EXPORT void entry(struct Foo foo, uint8_t bar[]);

Miscellaneous Improvements

Bug Fixes

This Release Contains Bugs

Zig has known bugs.

The first release that will ship with no known bugs will be 1.0.0.

Roadmap

Thank you contributors!

Thank you financial supporters!

Special thanks to those who donate monthly. We're now at $224 of the $3,000 goal. You're making Zig sustainable.