Skip to content

Commit

Permalink
jit: Initial x64 implementation.
Browse files Browse the repository at this point in the history
  • Loading branch information
fubark committed Dec 8, 2023
1 parent 08cee6b commit 443b1e8
Show file tree
Hide file tree
Showing 9 changed files with 1,175 additions and 140 deletions.
3 changes: 3 additions & 0 deletions src/chunk.zig
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ const llvm = @import("llvm.zig");
const llvm_gen = @import("llvm_gen.zig");
const bc_gen = @import("bc_gen.zig");
const jitgen = @import("jit/gen.zig");
const X64 = @import("jit/x64.zig");

pub const ChunkId = u32;

Expand Down Expand Up @@ -129,6 +130,7 @@ pub const Chunk = struct {
/// Shared final code buffer.
buf: *cy.ByteCodeBuffer,
jitBuf: *jitgen.CodeBuffer,
x64Enc: X64.Encoder,

nodes: []cy.Node,
tokens: []const cy.Token,
Expand Down Expand Up @@ -216,6 +218,7 @@ pub const Chunk = struct {
.curObjectSym = null,
.buf = undefined,
.jitBuf = undefined,
.x64Enc = undefined,
.curNodeId = cy.NullId,
.symInitDeps = .{},
.symInitInfos = .{},
Expand Down
4 changes: 2 additions & 2 deletions src/jit/a64.zig
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,8 @@ pub const BrCond = packed struct {
return @bitCast(self);
}

pub fn init(cond: Cond, imm: u19) BrCond {
return .{ .cond = @intFromEnum(cond), .imm19 = imm };
pub fn init(cond: Cond, imm: i19) BrCond {
return .{ .cond = @intFromEnum(cond), .imm19 = @bitCast(imm) };
}
};

Expand Down
82 changes: 66 additions & 16 deletions src/jit/a64_assembler.zig
Original file line number Diff line number Diff line change
Expand Up @@ -3,35 +3,61 @@ const stdx = @import("stdx");
const cy = @import("../cyber.zig");
const t = stdx.testing;
const Slot = cy.register.RegisterId;
const sasm = @import("assembler.zig");
const assm = @import("assembler.zig");
const A64 = @import("a64.zig");
const VRegister = sasm.VRegister;
const LRegister = assm.LRegister;
const Register = A64.Register;
const gen = @import("gen.zig");

pub const FpReg: A64.Register = .x1;

pub fn genLoadSlot(c: *cy.Chunk, dst: VRegister, src: Slot) !void {
try c.jitPushU32(A64.LoadStore.ldrImmOff(FpReg, src, fromVReg(dst)).bitCast());
pub fn genLoadSlot(c: *cy.Chunk, dst: LRegister, src: Slot) !void {
try c.jitPushU32(A64.LoadStore.ldrImmOff(FpReg, src, toReg(dst)).bitCast());
}

pub fn genStoreSlot(c: *cy.Chunk, dst: Slot, src: VRegister) !void {
try c.jitPushU32(A64.LoadStore.strImmOff(FpReg, dst, fromVReg(src)).bitCast());
pub fn genStoreSlot(c: *cy.Chunk, dst: Slot, src: LRegister) !void {
try c.jitPushU32(A64.LoadStore.strImmOff(FpReg, dst, toReg(src)).bitCast());
}

pub fn genAddImm(c: *cy.Chunk, dst: VRegister, src: VRegister, imm: u64) !void {
try c.jitPushU32(A64.AddSubImm.add(fromVReg(dst), fromVReg(src), @intCast(imm)).bitCast());
pub fn genAddImm(c: *cy.Chunk, dst: LRegister, src: LRegister, imm: u64) !void {
try c.jitPushU32(A64.AddSubImm.add(toReg(dst), toReg(src), @intCast(imm)).bitCast());
}

pub fn genMovImm(c: *cy.Chunk, dst: VRegister, imm: u64) !void {
try copyImm64(c, fromVReg(dst), imm);
pub fn genMovImm(c: *cy.Chunk, dst: LRegister, imm: u64) !void {
try copyImm64(c, toReg(dst), imm);
}

pub fn genMovPcRel(c: *cy.Chunk, dst: VRegister, offset: i32) !void {
try c.jitPushU32(A64.PcRelAddr.adr(fromVReg(dst), @intCast(offset)).bitCast());
pub fn genPatchableJumpRel(c: *cy.Chunk) !void {
try c.jitPushU32(A64.BrImm.bl(0).bitCast());
}

pub fn patchMovPcRelTo(c: *cy.Chunk, pc: usize, to: usize) !void {
pub fn patchJumpRel(c: *cy.Chunk, pc: usize, to: usize) void {
var inst: *A64.BrImm = @ptrCast(@alignCast(&c.jitBuf.buf.items[pc]));
inst.setOffsetFrom(pc, to);
}

pub fn genCmp(c: *cy.Chunk, left: LRegister, right: LRegister) !void {
try c.jitPushU32(A64.AddSubShifted.cmp(toReg(left), toReg(right)).bitCast());
}

pub fn genJumpCond(c: *cy.Chunk, cond: assm.LCond, offset: i32) !void {
try c.jitPushU32(A64.BrCond.init(toCond(cond), offset).bitCast());
}

pub fn patchJumpCond(c: *cy.Chunk, pc: usize, to: usize) void {
const inst = c.jitGetA64Inst(pc, A64.BrCond);
inst.imm19 = @intCast((to - pc) >> 2);
}

pub fn genMovPcRel(c: *cy.Chunk, dst: LRegister, to: usize) !void {
try c.jitPushU32(A64.PcRelAddr.adrFrom(toReg(dst), c.jitGetPos(), to).bitCast());
}

pub fn genPatchableMovPcRel(c: *cy.Chunk, dst: LRegister) !void {
try c.jitPushU32(A64.PcRelAddr.adr(toReg(dst), 0).bitCast());
}

pub fn patchMovPcRelTo(c: *cy.Chunk, pc: usize, to: usize) void {
const adr = c.jitGetA64Inst(pc, A64.PcRelAddr);
adr.setOffsetFrom(pc, to);
}
Expand All @@ -49,6 +75,23 @@ pub fn genMainReturn(c: *cy.Chunk) !void {
try c.jitPushU32(A64.Br.ret().bitCast());
}

pub fn genCallFunc(c: *cy.Chunk, ret: Slot, func: *cy.Func) !void {
// Skip ret info.
// Skip bc pc slot.
try genStoreSlot(c, ret + 3, .fp);

// Advance fp.
try genAddImm(c, .fp, .fp, 8 * ret);

// Push empty branch.
const jumpPc = c.jitGetPos();
try c.jitBuf.relocs.append(c.alloc, .{ .type = .jumpToFunc, .data = .{ .jumpToFunc = .{
.func = func,
.pc = @intCast(jumpPc),
}}});
try assm.genPatchableJumpRel(c);
}

pub fn genCallFuncPtr(c: *cy.Chunk, ptr: *const anyopaque) !void {
// No reloc needed, copy address to x30 (since it's already spilled) and invoke with blr.
try copyImm64(c, .x30, @intFromPtr(ptr));
Expand All @@ -70,13 +113,20 @@ pub fn genBreakpoint(c: *cy.Chunk) !void {
try c.jitPushU32(A64.Exception.brk(0xf000).bitCast());
}

fn fromVReg(arg: VRegister) Register {
return switch (arg) {
fn toCond(cond: LRegister) A64.Cond {
return switch (cond) {
.ge => .ge,
else => unreachable,
};
}

fn toReg(reg: LRegister) Register {
return switch (reg) {
.arg0 => .x2,
.arg1 => .x3,
.arg2 => .x4,
.arg3 => .x5,
.fp => .x1,
.fp => FpReg,
.temp => .x8,
};
}
Expand Down
92 changes: 83 additions & 9 deletions src/jit/assembler.zig
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,13 @@ const cy = @import("../cyber.zig");
const Slot = cy.register.RegisterId;

const a64 = @import("a64_assembler.zig");
const x64 = @import("x64_assembler.zig");

/// Provides a common interface for assembling machine code related to stencils.
/// Most machine code is still being generated from stencils.

pub const VRegister = enum {
/// Logical register.
pub const LRegister = enum {
fp,
arg0,
arg1,
Expand All @@ -16,51 +18,111 @@ pub const VRegister = enum {
temp,
};

pub fn genLoadSlot(c: *cy.Chunk, dst: VRegister, src: Slot) !void {
pub const LCond = enum(u8) {
ge,
_,
};

pub fn genLoadSlot(c: *cy.Chunk, dst: LRegister, src: Slot) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genLoadSlot(c, dst, src),
.x86_64 => try x64.genLoadSlot(c, dst, src),
else => return error.Unsupported,
}
}

pub fn genStoreSlot(c: *cy.Chunk, dst: Slot, src: VRegister) !void {
pub fn genStoreSlot(c: *cy.Chunk, dst: Slot, src: LRegister) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genStoreSlot(c, dst, src),
.x86_64 => try x64.genStoreSlot(c, dst, src),
else => return error.Unsupported,
}
}

pub fn genAddImm(c: *cy.Chunk, dst: VRegister, src: VRegister, imm: u64) !void {
pub fn genAddImm(c: *cy.Chunk, dst: LRegister, src: LRegister, imm: u64) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genAddImm(c, dst, src, imm),
.x86_64 => try x64.genAddImm(c, dst, src, imm),
else => return error.Unsupported,
}
}

pub fn genMovImm(c: *cy.Chunk, dst: VRegister, imm: u64) !void {
pub fn genMovImm(c: *cy.Chunk, dst: LRegister, imm: u64) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genMovImm(c, dst, imm),
.x86_64 => try x64.genMovImm(c, dst, imm),
else => return error.Unsupported,
}
}

pub fn genJumpCond(c: *cy.Chunk, cond: LCond, offset: i32) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genJumpCond(c, cond, offset),
.x86_64 => try x64.genJumpCond(c, cond, offset),
else => return error.Unsupported,
}
}

pub fn patchJumpCond(c: *cy.Chunk, pc: usize, to: usize) void {
switch (builtin.cpu.arch) {
.aarch64 => a64.patchJumpCond(c, pc, to),
.x86_64 => x64.patchJumpCond(c, pc, to),
else => unreachable,
}
}

pub fn genPatchableJumpRel(c: *cy.Chunk) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genPatchableJumpRel(c),
.x86_64 => try x64.genPatchableJumpRel(c),
else => return error.Unsupported,
}
}

pub fn patchJumpRel(c: *cy.Chunk, pc: usize, to: usize) void {
switch (builtin.cpu.arch) {
.aarch64 => a64.patchJumpRel(c, pc, to),
.x86_64 => x64.patchJumpRel(c, pc, to),
else => unreachable,
}
}

pub fn genCmp(c: *cy.Chunk, left: LRegister, right: LRegister) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genCmp(c, left, right),
.x86_64 => try x64.genCmp(c, left, right),
else => return error.Unsupported,
}
}

pub fn genMovPcRel(c: *cy.Chunk, dst: VRegister, offset: i32) !void {
pub fn genMovPcRel(c: *cy.Chunk, dst: LRegister, to: usize) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genMovPcRel(c, dst, offset),
.aarch64 => try a64.genMovPcRel(c, dst, to),
.x86_64 => try x64.genMovPcRel(c, dst, to),
else => return error.Unsupported,
}
}

pub fn patchMovPcRelTo(c: *cy.Chunk, pc: usize, to: usize) !void {
pub fn genPatchableMovPcRel(c: *cy.Chunk, dst: LRegister) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.patchMovPcRelTo(c, pc, to),
.aarch64 => try a64.genPatchableMovPcRel(c, dst),
.x86_64 => try x64.genPatchableMovPcRel(c, dst),
else => return error.Unsupported,
}
}

pub fn patchMovPcRelTo(c: *cy.Chunk, pc: usize, to: usize) void {
switch (builtin.cpu.arch) {
.aarch64 => a64.patchMovPcRelTo(c, pc, to),
.x86_64 => x64.patchMovPcRelTo(c, pc, to),
else => unreachable,
}
}

pub fn genStoreSlotImm(c: *cy.Chunk, dst: Slot, imm: u64) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genStoreSlotImm(c, dst, imm),
.x86_64 => try x64.genStoreSlotImm(c, dst, imm),
else => return error.Unsupported,
}
}
Expand All @@ -72,27 +134,39 @@ pub fn genStoreSlotValue(c: *cy.Chunk, dst: Slot, val: cy.Value) !void {
pub fn genBreakpoint(c: *cy.Chunk) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genBreakpoint(c),
.x86_64 => try x64.genBreakpoint(c),
else => return error.Unsupported,
}
}

pub fn genCallFunc(c: *cy.Chunk, ret: Slot, func: *cy.Func) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genCallFunc(c, ret, func),
.x86_64 => try x64.genCallFunc(c, ret, func),
else => return error.Unsupported,
}
}

pub fn genCallFuncPtr(c: *cy.Chunk, ptr: *const anyopaque) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genCallFuncPtr(c, ptr),
.x86_64 => try x64.genCallFuncPtr(c, ptr),
else => return error.Unsupported,
}
}

pub fn genFuncReturn(c: *cy.Chunk) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genFuncReturn(c),
.x86_64 => try x64.genFuncReturn(c),
else => return error.Unsupported,
}
}

pub fn genMainReturn(c: *cy.Chunk) !void {
switch (builtin.cpu.arch) {
.aarch64 => try a64.genMainReturn(c),
.x86_64 => try x64.genMainReturn(c),
else => return error.Unsupported,
}
}
Loading

0 comments on commit 443b1e8

Please sign in to comment.