Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tweaks to improve cleanliness of code; removes warnings that appear in Eclipse #11

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
/.classpath
/.project
86 changes: 45 additions & 41 deletions Llama3.java
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -85,44 +85,45 @@ static void runInteractive(Llama model, Sampler sampler, Options options) {
conversationTokens.addAll(chatFormat.encodeMessage(new ChatFormat.Message(ChatFormat.Role.SYSTEM, options.systemPrompt())));
}
int startPosition = 0;
Scanner in = new Scanner(System.in);
while (true) {
System.out.print("> ");
System.out.flush();
String userText = in.nextLine();
if (List.of("quit", "exit").contains(userText)) {
break;
}
if (state == null) {
state = model.createNewState();
}
conversationTokens.addAll(chatFormat.encodeMessage(new ChatFormat.Message(ChatFormat.Role.USER, userText)));
conversationTokens.addAll(chatFormat.encodeHeader(new ChatFormat.Message(ChatFormat.Role.ASSISTANT, "")));
Set<Integer> stopTokens = chatFormat.getStopTokens();
List<Integer> responseTokens = Llama.generateTokens(model, state, startPosition, conversationTokens.subList(startPosition, conversationTokens.size()), stopTokens, options.maxTokens(), sampler, options.echo(), token -> {
if (options.stream()) {
if (!model.tokenizer().isSpecialToken(token)) {
System.out.print(model.tokenizer().decode(List.of(token)));
}
}
});
// Include stop token in the prompt history, but not in the response displayed to the user.
conversationTokens.addAll(responseTokens);
startPosition = conversationTokens.size();
Integer stopToken = null;
if (!responseTokens.isEmpty() && stopTokens.contains(responseTokens.getLast())) {
stopToken = responseTokens.getLast();
responseTokens.removeLast();
}
if (!options.stream()) {
String responseText = model.tokenizer().decode(responseTokens);
System.out.println(responseText);
}
if (stopToken == null) {
System.err.println("Ran out of context length...");
break;
}
}
try (Scanner in = new Scanner(System.in)) {
while (true) {
System.out.print("> ");
System.out.flush();
String userText = in.nextLine();
if (List.of("quit", "exit").contains(userText)) {
break;
}
if (state == null) {
state = model.createNewState();
}
conversationTokens.addAll(chatFormat.encodeMessage(new ChatFormat.Message(ChatFormat.Role.USER, userText)));
conversationTokens.addAll(chatFormat.encodeHeader(new ChatFormat.Message(ChatFormat.Role.ASSISTANT, "")));
Set<Integer> stopTokens = chatFormat.getStopTokens();
List<Integer> responseTokens = Llama.generateTokens(model, state, startPosition, conversationTokens.subList(startPosition, conversationTokens.size()), stopTokens, options.maxTokens(), sampler, options.echo(), token -> {
if (options.stream()) {
if (!model.tokenizer().isSpecialToken(token)) {
System.out.print(model.tokenizer().decode(List.of(token)));
}
}
});
// Include stop token in the prompt history, but not in the response displayed to the user.
conversationTokens.addAll(responseTokens);
startPosition = conversationTokens.size();
Integer stopToken = null;
if (!responseTokens.isEmpty() && stopTokens.contains(responseTokens.getLast())) {
stopToken = responseTokens.getLast();
responseTokens.removeLast();
}
if (!options.stream()) {
String responseText = model.tokenizer().decode(responseTokens);
System.out.println(responseText);
}
if (stopToken == null) {
System.err.println("Ran out of context length...");
break;
}
}
}
}

static void runInstructOnce(Llama model, Sampler sampler, Options options) {
Expand Down Expand Up @@ -346,7 +347,8 @@ public int byteSize() {
}
}

private void loadModelImpl(FileChannel fileChannel) throws IOException {
@SuppressWarnings("preview")
private void loadModelImpl(FileChannel fileChannel) throws IOException {
// The header of the file.
readHeader(fileChannel); // gguf_header_t header;
// Tensor infos, which can be used to locate the tensor data.
Expand Down Expand Up @@ -726,7 +728,7 @@ private static Tokenizer createTokenizer(Map<String, Object> metadata, Vocabular

int allTokens = vocabulary.size();
int baseTokens = 128000; // assume all tokens after the base ones are special.
int reservedSpecialTokens = allTokens - baseTokens;

List<String> specialTokensList = Arrays.stream(vocabulary.tokens(), baseTokens, allTokens).toList();

assert specialTokensList.stream().allMatch(token -> vocabulary.getIndex(token).isPresent());
Expand Down Expand Up @@ -1868,9 +1870,11 @@ public static Pair<float[], float[]> precomputeFreqsCis(int contextLength, int h
float loFreqWavelen = oldContextLength / loFreqFactor;
float hiFreqWavelen = oldContextLength / hiFreqFactor;
float wavelen = (float) (2.0 * Math.PI / freq);

//This doesn't do anything, so it triggers a warning.
if (wavelen < hiFreqWavelen) {
freq = freq;
} else if (wavelen > loFreqWavelen) {
} else if (wavelen > loFreqWavelen) {
freq = freq / scaleFactor;
} else {
float smooth = (oldContextLength / wavelen - loFreqFactor) / (hiFreqFactor - loFreqFactor);
Expand Down