Skip to content

Commit

Permalink
[IN PROGRESS] update mock promt & response
Browse files Browse the repository at this point in the history
  • Loading branch information
ntqdinh-axonivy committed Nov 19, 2024
1 parent ea2b583 commit d1f9c21
Show file tree
Hide file tree
Showing 6 changed files with 152 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,11 @@ public class MockAI {
"completions", json(load("completions.json")),
"completions-response", json(load("completions-response.json")),
"mail-generator", json(load("mail-generator.json")),
"mail-generator-response", json(load("mail-generator-response.json"))
);
"mail-generator-response", json(load("mail-generator-response.json")),
"assist-ask-without-system-promt", json(load("assist-ask-without-system-promt.json")),
"assist-ask-without-system-promt-response", json(load("assist-ask-without-system-promt-response.json")),
"assist-insert-with-system-promt", json(load("assist-insert-with-system-promt.json")),
"assist-insert-with-system-promt-response", json(load("assist-insert-with-system-promt-response.json")));

@POST
@Path("completions")
Expand Down Expand Up @@ -89,13 +92,15 @@ private String input(JsonNode request, Map<String, JsonNode> examples) {
return null;
}

@POST
@POST
@Path("chat/completions")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response chat(JsonNode request) {
Ivy.log().warn(request.toPrettyString());
var in = input(request, openAIExamples);
var node = openAIExamples.get(in+"-response");
Ivy.log().warn(request);
return Response.ok()
.entity(node)
.build();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
{
"id": "chatcmpl-AVHUYbcl2WKpRj0oaLrZm8yNXosBQ",
"object": "chat.completion",
"created": 1732018770,
"model": "gpt-3.5-turbo-0125",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Sure, here is a combobox for you to pick a brand out of Mercedes, BMW, or Tesla:\n\n<select>\n <option value=\"mercedes\">Mercedes</option>\n <option value=\"bmw\">BMW</option>\n <option value=\"tesla\">Tesla</option>\n</select>",
"refusal": null
},
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 28,
"completion_tokens": 64,
"total_tokens": 92,
"prompt_tokens_details": {
"cached_tokens": 0,
"audio_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0,
"audio_tokens": 0,
"accepted_prediction_tokens": 0,
"rejected_prediction_tokens": 0
}
},
"system_fingerprint": null
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
{
"model": "gpt-3.5-turbo",
"max_tokens": 1024,
"temperature": 1,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"messages": [
{
"role": "user",
"content": "insert a combobox to pick a brand out of: Mercedes, BMW or Tesla"
}
]
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
{
"id": "chatcmpl-AVHNuLIeqrGBUWPhDez363Gs5akFe",
"object": "chat.completion",
"created": 1732018358,
"model": "gpt-3.5-turbo-0125",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "<select id=\"brand-select\">\n <option value=\"mercedes\">Mercedes</option>\n <option value=\"bmw\">BMW</option>\n <option value=\"tesla\">Tesla</option>\n</select>",
"refusal": null
},
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 179,
"completion_tokens": 46,
"total_tokens": 225,
"prompt_tokens_details": {
"cached_tokens": 0,
"audio_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0,
"audio_tokens": 0,
"accepted_prediction_tokens": 0,
"rejected_prediction_tokens": 0
}
},
"system_fingerprint": null
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"model": "gpt-3.5-turbo",
"max_tokens": 1024,
"temperature": 1,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"messages": [
{
"role": "system",
"content": "SYSTEM_PROMT"
},
{
"role": "user",
"content": "insert a combobox to pick a brand out of: Mercedes, BMW or Tesla"
}
]
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
import org.junit.jupiter.api.Test;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;

import ch.ivyteam.ivy.application.IApplication;
import ch.ivyteam.ivy.environment.AppFixture;
Expand Down Expand Up @@ -85,6 +88,13 @@ void mailGenerator() {
.isNotEmpty();
}

@Test
void askWithOutSystemPromt() {
JsonNode result = assistWithQuestion("insert a combobox to pick a brand out of: Mercedes, BMW or Tesla", false);
assertThat(result.toPrettyString())
.isNotEmpty();
}

private static JsonNode assist(JsonNode quest) {
WebTarget client = Ivy.rest().client(OPEN_AI);
Entity<JsonNode> request = Entity.entity(quest, MediaType.APPLICATION_JSON);
Expand All @@ -100,5 +110,39 @@ private static JsonNode chatAssist(JsonNode quest) {
.post(request).readEntity(JsonNode.class);
return result;
}

private static JsonNode assistWithQuestion(String question, boolean includeSystemPrompt) {
WebTarget client = Ivy.rest().client(OPEN_AI);
Entity<JsonNode> request = buildPayloadFromQuestion(question, includeSystemPrompt);
Ivy.log().warn("here");
JsonNode result = client.path("chat/completions").request()
.post(request).readEntity(JsonNode.class);
return result;
}

private static Entity<JsonNode> buildPayloadFromQuestion(String question, boolean includeSystemPrompt) {
ArrayNode arrayNode = JsonNodeFactory.instance.arrayNode();
if (includeSystemPrompt) {
arrayNode.add(message("system", "SYSTEM_PROMT"));
}
arrayNode.add(message("user", question));
ObjectNode request = completion().set("messages", arrayNode);
return Entity.entity(request, MediaType.APPLICATION_JSON);
}

private static ObjectNode message(String role, String content) {
return JsonNodeFactory.instance.objectNode().put("role", role).put("content", content);
}

private static ObjectNode completion() {
ObjectNode request = JsonNodeFactory.instance.objectNode();
request.put("model", "gpt-3.5-turbo");
request.put("temperature", 1);
request.put("top_p", 1);
request.put("frequency_penalty", 0);
request.put("presence_penalty", 0);
request.put("max_tokens", 1024);
return request;
}

}

0 comments on commit d1f9c21

Please sign in to comment.