Skip to content

Commit

Permalink
Add atomics support to asmjs
Browse files Browse the repository at this point in the history
  • Loading branch information
Maqrkk committed Dec 18, 2023
1 parent 6292a94 commit 3b67315
Show file tree
Hide file tree
Showing 2 changed files with 255 additions and 6 deletions.
9 changes: 7 additions & 2 deletions llvm/include/llvm/Cheerp/Writer.h
Original file line number Diff line number Diff line change
Expand Up @@ -453,11 +453,12 @@ class CheerpWriter final : public CheerpBaseWriter
COMPILE_INSTRUCTION_FEEDBACK compileNotInlineableInstruction(const llvm::Instruction& I, PARENT_PRIORITY parentPrio);
COMPILE_INSTRUCTION_FEEDBACK compileInlineableInstruction(const llvm::Instruction& I, PARENT_PRIORITY parentPrio);
COMPILE_INSTRUCTION_FEEDBACK compileCallInstruction(const llvm::CallBase& I, PARENT_PRIORITY parentPrio);
void compileLoadElem(const llvm::Value* ptrOp, llvm::Type* Ty, llvm::StructType* STy, POINTER_KIND ptrKind, POINTER_KIND loadKind, bool isOffset, Registerize::REGISTER_KIND regKind, uint32_t structElemIdx, bool asmjs, PARENT_PRIORITY parentPrio);
void compileLoadElem(const llvm::Value* ptrOp, llvm::Type* Ty, POINTER_KIND ptrKind, POINTER_KIND loadKind, bool isOffset, Registerize::REGISTER_KIND regKind, bool asmjs, PARENT_PRIORITY parentPrio);
void compileLoadElem(const llvm::LoadInst& li, llvm::Type* Ty, llvm::StructType* STy, POINTER_KIND ptrKind, POINTER_KIND loadKind, bool isOffset, Registerize::REGISTER_KIND regKind, uint32_t structElemIdx, bool asmjs, PARENT_PRIORITY parentPrio);
void compileLoad(const llvm::LoadInst& li, PARENT_PRIORITY parentPrio);
void compileStoreElem(const llvm::StoreInst& si, llvm::Type* Ty, llvm::StructType* STy, POINTER_KIND ptrKind, POINTER_KIND storedKind, bool isOffset, Registerize::REGISTER_KIND regKind, uint32_t structElemIdx, uint32_t elemIdx, bool asmjs);
void compileStore(const llvm::StoreInst& si);
void compileAtomicRMW(const llvm::AtomicRMWInst& ai, PARENT_PRIORITY parentPrio);
void compileAtomicCmpXchg(const llvm::AtomicCmpXchgInst& ai, PARENT_PRIORITY parentPrio);

void compileSignedInteger(const llvm::Value* v, bool forComparison, PARENT_PRIORITY parentPrio);
void compileUnsignedInteger(const llvm::Value* v, bool forAsmJSComparison, PARENT_PRIORITY parentPrio, bool forceTruncation = false);
Expand Down Expand Up @@ -692,6 +693,10 @@ class CheerpWriter final : public CheerpBaseWriter
* Compile the function for growing the wasm linear memory
*/
void compileGrowMem();
/**
* Compile the atomic functions
*/
void compileAtomicFunctions();
/**
* Compile an helper function to assign all global heap symbols
*/
Expand Down
252 changes: 248 additions & 4 deletions llvm/lib/CheerpWriter/CheerpWriter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4355,6 +4355,18 @@ CheerpWriter::COMPILE_INSTRUCTION_FEEDBACK CheerpWriter::compileInlineableInstru
}
return COMPILE_OK;
}
case Instruction::AtomicRMW:
{
const AtomicRMWInst& ai = cast<AtomicRMWInst>(I);
compileAtomicRMW(ai, parentPrio);
return COMPILE_OK;
}
case Instruction::AtomicCmpXchg:
{
const AtomicCmpXchgInst& ai = cast<AtomicCmpXchgInst>(I);
compileAtomicCmpXchg(ai, parentPrio);
return COMPILE_OK;
}
default:
stream << "alert('Unsupported code')";
llvm::errs() << "\tImplement inst " << I.getOpcodeName() << '\n';
Expand Down Expand Up @@ -4413,7 +4425,7 @@ void CheerpWriter::compileLoad(const LoadInst& li, PARENT_PRIORITY parentPrio)
elemPtrKind = PA.getPointerKind(&li);
}
bool isOffset = ie.ptrIdx == 1;
compileLoadElem(ptrOp, Ty, STy, ptrKind, elemPtrKind, isOffset, elemRegKind, ie.structIdx, asmjs, parentPrio);
compileLoadElem(li, Ty, STy, ptrKind, elemPtrKind, isOffset, elemRegKind, ie.structIdx, asmjs, parentPrio);
if(needsCheckBounds)
{
needsCheckBounds = false;
Expand All @@ -4422,9 +4434,38 @@ void CheerpWriter::compileLoad(const LoadInst& li, PARENT_PRIORITY parentPrio)
}
}

void CheerpWriter::compileLoadElem(const Value* ptrOp, Type* Ty, StructType* STy, POINTER_KIND ptrKind, POINTER_KIND loadKind, bool isOffset, Registerize::REGISTER_KIND regKind, uint32_t structElemIdx, bool asmjs, PARENT_PRIORITY parentPrio)
void CheerpWriter::compileLoadElem(const LoadInst& li, Type* Ty, StructType* STy, POINTER_KIND ptrKind, POINTER_KIND loadKind, bool isOffset, Registerize::REGISTER_KIND regKind, uint32_t structElemIdx, bool asmjs, PARENT_PRIORITY parentPrio)
{
if(regKind==Registerize::INTEGER && needsIntCoercion(parentPrio))
const Value* ptrOp = li.getPointerOperand();
if (li.isAtomic())
{
assert(!STy);
assert(!isOffset);
PARENT_PRIORITY shiftPrio = SHIFT;
uint32_t shift = getHeapShiftForType(Ty);
if (shift == 0)
shiftPrio = LOWEST;
stream << namegen.getBuiltinName(NameGenerator::Builtin::ATOMICLOAD) << "(";
if (Ty->isIntegerTy(1) || Ty->isIntegerTy(8))
stream << "8,";
else if (Ty->isIntegerTy(16))
stream << "16,";
else if (Ty->isIntegerTy(32))
stream << "32,";
else if (Ty->isIntegerTy(64) && UseBigInts)
stream << "64,";
else
llvm::report_fatal_error("Unsupported bitwidth for atomic load");
compileRawPointer(ptrOp, shiftPrio);
if (shift != 0)
stream << ">>" << shift;
stream << ")";

if (li.getType()->isIntegerTy() && parentPrio != BIT_OR)
stream << "|0";
return ;
}
else if(regKind==Registerize::INTEGER && needsIntCoercion(parentPrio))
{
if (parentPrio > BIT_OR)
stream << '(';
Expand Down Expand Up @@ -4600,6 +4641,34 @@ void CheerpWriter::compileStoreElem(const StoreInst& si, Type* Ty, StructType* S
const Value* ptrOp=si.getPointerOperand();
const Value* valOp=si.getValueOperand();
assert(ptrKind != CONSTANT);
if (si.isAtomic())
{
assert(!STy);
assert(!isOffset);
stream << namegen.getBuiltinName(NameGenerator::Builtin::ATOMICSTORE) << "(";
Type* t = valOp->getType();
PARENT_PRIORITY shiftPrio = SHIFT;
uint32_t shift = getHeapShiftForType(t);
if (shift == 0)
shiftPrio = LOWEST;
if (t->isIntegerTy(1) || t->isIntegerTy(8))
stream << "8,";
else if (t->isIntegerTy(16))
stream << "16,";
else if (t->isIntegerTy(32))
stream << "32,";
else if (t->isIntegerTy(64) && UseBigInts)
stream << "64,";
else
llvm::report_fatal_error("Unsupported bitwidth for atomic store");
compileRawPointer(ptrOp, shiftPrio);
if (shift != 0)
stream << ">>" << shift;
stream << ",";
compileOperand(valOp, BIT_OR);
stream << "|0)";
return ;
}
if (RAW == ptrKind || (asmjs && ptrKind == CONSTANT))
{
assert(!isOffset);
Expand Down Expand Up @@ -4703,6 +4772,101 @@ void CheerpWriter::compileStoreElem(const StoreInst& si, Type* Ty, StructType* S
}
}

void CheerpWriter::compileAtomicRMW(const AtomicRMWInst& ai, PARENT_PRIORITY parentPrio)
{
switch(ai.getOperation())
{
case AtomicRMWInst::BinOp::Xchg:
stream << namegen.getBuiltinName(NameGenerator::Builtin::ATOMICXCHG) << "(";
break;
case AtomicRMWInst::BinOp::Add:
stream << namegen.getBuiltinName(NameGenerator::Builtin::ATOMICADD) << "(";
break;
case AtomicRMWInst::BinOp::Sub:
stream << namegen.getBuiltinName(NameGenerator::Builtin::ATOMICSUB) << "(";
break;
case AtomicRMWInst::BinOp::And:
stream << namegen.getBuiltinName(NameGenerator::Builtin::ATOMICAND) << "(";
break;
case AtomicRMWInst::BinOp::Or:
stream << namegen.getBuiltinName(NameGenerator::Builtin::ATOMICOR) << "(";
break;
case AtomicRMWInst::BinOp::Xor:
stream << namegen.getBuiltinName(NameGenerator::Builtin::ATOMICXOR) << "(";
break;
default:
llvm::report_fatal_error("Unsupported atomicrmw opcode");
}
const Value* ptrOp=ai.getPointerOperand();
const Value* valOp=ai.getValOperand();
Type* t = valOp->getType();
PARENT_PRIORITY shiftPrio = SHIFT;
uint32_t shift = getHeapShiftForType(t);
if (shift == 0)
shiftPrio = LOWEST;
if (t->isIntegerTy(1) || t->isIntegerTy(8))
stream << "8,";
else if (t->isIntegerTy(16))
stream << "16,";
else if (t->isIntegerTy(32))
stream << "32,";
else if (t->isIntegerTy(64) && UseBigInts)
stream << "64,";
else
llvm::report_fatal_error("Unsupported bitwidth for atomicrmw");
compileRawPointer(ptrOp, shiftPrio);
if (shift != 0)
stream << ">>" << shift;
stream << ",";
compileOperand(valOp, BIT_OR);
stream << "|0)";

if (ai.getType()->isIntegerTy() && parentPrio != BIT_OR)
stream << "|0";
}

void CheerpWriter::compileAtomicCmpXchg(const AtomicCmpXchgInst& ai, PARENT_PRIORITY parentPrio)
{
const Value* ptrOp=ai.getPointerOperand();
const Value* cmpOp=ai.getCompareOperand();
const Value* newValOp=ai.getNewValOperand();
Type* t = newValOp->getType();
PARENT_PRIORITY shiftPrio = SHIFT;
uint32_t shift = getHeapShiftForType(t);
if (shift == 0)
shiftPrio = LOWEST;

// We need to use the compare operand twice, so we load it into the second register first.
StringRef reg2 = namegen.getName(&ai, 1);
stream << "(" << reg2 << "=";
// We end up using compileOperand on the cmpOp twice. But this is ok, since we make sure
// that a compare operand to an AtomicCmpXchg instruction is never inlineable.
compileOperand(cmpOp, BIT_OR);
stream << "|0),";
// Now we compile the main atomic compare exchange function.
stream << namegen.getBuiltinName(NameGenerator::Builtin::ATOMICCMPXCHG) << "(";
if (t->isIntegerTy(1) || t->isIntegerTy(8))
stream << "8,";
else if (t->isIntegerTy(16))
stream << "16,";
else if (t->isIntegerTy(32))
stream << "32,";
else if (t->isIntegerTy(64) && UseBigInts)
stream << "64,";
else
llvm::report_fatal_error("Unsupported bitwidth for atomicmpxchg");
compileRawPointer(ptrOp, shiftPrio);
if (shift != 0)
stream << ">>" << shift;
stream << "," << reg2 << "|0,";
compileOperand(newValOp, BIT_OR);
stream << "|0)|0;" << NewLine;

// Finally compile the second part of this instruction, the comparison between the loaded value
// and the compare operand (which is in the second register for this instruction).
stream << reg2 << "=(" << namegen.getName(&ai, 0) << "|0)==(" << reg2 << "|0)";
}

CheerpWriter::COMPILE_INSTRUCTION_FEEDBACK CheerpWriter::compileCallInstruction(const CallBase& ci, PARENT_PRIORITY parentPrio)
{
bool asmjs = currentFun->getSection() == StringRef("asmjs");
Expand Down Expand Up @@ -6008,6 +6172,64 @@ void CheerpWriter::compileGrowMem()
stream << "}" << NewLine;
}

void CheerpWriter::compileAtomicFunctions()
{
auto funcName = namegen.getBuiltinName(NameGenerator::Builtin::ATOMICLOAD);
stream << "function " << funcName << "(bitwidth, addr){" << NewLine;
stream << "if(bitwidth==8)" << NewLine;
stream << "return Atomics.load(" << getHeapName(HEAP8) << ", addr);" << NewLine;
stream << "else if(bitwidth==16)" << NewLine;
stream << "return Atomics.load(" << getHeapName(HEAP16) << ", addr);" << NewLine;
stream << "else if(bitwidth==32)" << NewLine;
stream << "return Atomics.load(" << getHeapName(HEAP32) << ", addr);" << NewLine;
if (UseBigInts)
{
stream << "else if(bitwidth==64)" << NewLine;
stream << "return Atomics.load(" << getHeapName(HEAP64) << ", addr);" << NewLine;
}
stream << "else " << NewLine;
stream << "throw new Error('Wrong bitwidth');" << NewLine;
stream << "}" << NewLine;
std::vector<std::string> opNames={"store","add","sub","and","or","xor","exchange"};
for (uint32_t i = 0; i < opNames.size(); i++)
{
auto b = static_cast<NameGenerator::Builtin>(i + NameGenerator::Builtin::ATOMICSTORE);
auto opName = opNames[i];
funcName = namegen.getBuiltinName(b);
stream << "function " << funcName << "(bitwidth, addr, val){" << NewLine;
stream << "if(bitwidth==8)" << NewLine;
stream << "return Atomics." << opName << "(" << getHeapName(HEAP8) << ", addr, val);" << NewLine;
stream << "else if(bitwidth==16)" << NewLine;
stream << "return Atomics." << opName << "(" << getHeapName(HEAP16) << ", addr, val);" << NewLine;
stream << "else if(bitwidth==32)" << NewLine;
stream << "return Atomics." << opName << "(" << getHeapName(HEAP32) << ", addr, val);" << NewLine;
if (UseBigInts)
{
stream << "else if(bitwidth==64)" << NewLine;
stream << "return Atomics." << opName << "(" << getHeapName(HEAP64) << ", addr, val);" << NewLine;
}
stream << "else " << NewLine;
stream << "throw new Error('Wrong bitwidth');" << NewLine;
stream << "}" << NewLine;
}
funcName = namegen.getBuiltinName(NameGenerator::Builtin::ATOMICCMPXCHG);
stream << "function " << funcName << "(bitwidth, addr, expected, replacement){" << NewLine;
stream << "if(bitwidth==8)" << NewLine;
stream << "return Atomics.compareExchange(" << getHeapName(HEAP8) << ", addr, expected, replacement);" << NewLine;
stream << "else if(bitwidth==16)" << NewLine;
stream << "return Atomics.compareExchange(" << getHeapName(HEAP16) << ", addr, expected, replacement);" << NewLine;
stream << "else if(bitwidth==32)" << NewLine;
stream << "return Atomics.compareExchange(" << getHeapName(HEAP32) << ", addr, expected, replacement);" << NewLine;
if (UseBigInts)
{
stream << "else if(bitwidth==64)" << NewLine;
stream << "return Atomics.compareExchange(" << getHeapName(HEAP64) << ", addr, expected, replacement);" << NewLine;
}
stream << "else " << NewLine;
stream << "throw new Error('Wrong bitwidth');" << NewLine;
stream << "}" << NewLine;
}

void CheerpWriter::compileMathDeclAsmJS()
{
stream << "var Infinity=stdlib.Infinity;" << NewLine;
Expand Down Expand Up @@ -6294,6 +6516,14 @@ void CheerpWriter::compileAsmJSClosure()
stream << namegen.getBuiltinName(NameGenerator::Builtin::GROW_MEM);
stream << ';' << NewLine;
}
if (globalDeps.usesAtomics())
{
for (int i = NameGenerator::Builtin::ATOMICLOAD; i <= NameGenerator::Builtin::ATOMICCMPXCHG; i++)
{
auto b = static_cast<NameGenerator::Builtin>(i);
stream << "var " << namegen.getBuiltinName(b) << "=ffi." << namegen.getBuiltinName(b) << ";" << NewLine;
}
}

// Declare globals
for ( const GlobalVariable* GV : linearHelper.globals() )
Expand Down Expand Up @@ -6337,14 +6567,25 @@ void CheerpWriter::compileAsmJSffiObject()
stream << namegen.getBuiltinName(NameGenerator::Builtin::GROW_MEM);
stream << ',' << NewLine;
}
if (globalDeps.usesAtomics())
{
for (int i = NameGenerator::Builtin::ATOMICLOAD; i <= NameGenerator::Builtin::ATOMICCMPXCHG; i++)
{
auto b = static_cast<NameGenerator::Builtin>(i);
stream << namegen.getBuiltinName(b) << ":" << namegen.getBuiltinName(b) << "," << NewLine;
}
}
stream << "}";
}

void CheerpWriter::compileAsmJSTopLevel()
{
compileDummies();

stream << "var __heap = new ArrayBuffer("<<heapSize*1024*1024<<");" << NewLine;
stream << "var __heap = new ";
if (globalDeps.usesAtomics())
stream << "Shared";
stream << "ArrayBuffer(" << heapSize * 1024 * 1024 << ");" << NewLine;
{
//Declare used HEAPs variables to null, to be inizializated by a later call to ASSIGN_HEAPS
bool isFirst = true;
Expand Down Expand Up @@ -6441,6 +6682,9 @@ void CheerpWriter::compileGenericJS()
//Compile growLinearMemory if needed
if (globalDeps.needsBuiltin(BuiltinInstr::BUILTIN::GROW_MEM))
compileGrowMem();

if (globalDeps.usesAtomics())
compileAtomicFunctions();
}

void CheerpWriter::compileDummies()
Expand Down

0 comments on commit 3b67315

Please sign in to comment.