Skip to content

Commit

Permalink
Polish
Browse files Browse the repository at this point in the history
  • Loading branch information
anunnakian committed May 1, 2024
1 parent 8dd12fd commit 0a0ac22
Show file tree
Hide file tree
Showing 7 changed files with 10 additions and 33 deletions.
2 changes: 1 addition & 1 deletion docs/docs/integrations/language-models/open-ai.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ OpenAiChatModel model = OpenAiChatModel.builder()
.proxy(...)
.logRequests(...)
.logResponses(...)
.logStreamingResponses(...)
.tokenizer(...)
.customHeaders(...)
.isStreaming(...)
.build();
```
See the description of some of the parameters above [here](https://platform.openai.com/docs/api-reference/chat/create).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ class OllamaOpenAiStreamingChatModelIT extends AbstractOllamaLanguageModelInfras
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.isStreaming(true)
.build();

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,7 @@ public OpenAiChatModel(String baseUrl,
Boolean logRequests,
Boolean logResponses,
Tokenizer tokenizer,
Map<String, String> customHeaders,
boolean isStreaming) {
Map<String, String> customHeaders) {

baseUrl = getOrDefault(baseUrl, OPENAI_URL);
if (OPENAI_DEMO_API_KEY.equals(apiKey)) {
Expand Down Expand Up @@ -178,11 +177,7 @@ public int estimateTokenCount(List<ChatMessage> messages) {
}

public static OpenAiChatModel withApiKey(String apiKey) {
return withApiKey(apiKey, false);
}

public static OpenAiChatModel withApiKey(String apiKey, boolean isStreaming) {
return builder().apiKey(apiKey).isStreaming(isStreaming).build();
return builder().apiKey(apiKey).build();
}

public static OpenAiChatModelBuilder builder() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
* You can find description of parameters <a href="https://platform.openai.com/docs/api-reference/chat/create">here</a>.
*
*
* @Deprecated use {@link OpenAiStreamingChatModel} instead.
* @Deprecated use {@link OpenAiChatModel} instead.
*/
@Deprecated()
public class OpenAiStreamingChatModel implements StreamingChatLanguageModel, TokenCountEstimator {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,25 +35,23 @@

class OpenAiStreamingChatModelIT {

OpenAiChatModel model = OpenAiChatModel.builder()
OpenAiStreamingChatModel model = OpenAiStreamingChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.isStreaming(true)
.build();

StreamingChatLanguageModel visionModel = OpenAiChatModel.builder()
StreamingChatLanguageModel visionModel = OpenAiStreamingChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.modelName(GPT_4_VISION_PREVIEW)
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.isStreaming(true)
.build();

ToolSpecification calculator = ToolSpecification.builder()
Expand Down Expand Up @@ -303,15 +301,14 @@ public void onError(Throwable error) {
void should_execute_multiple_tools_in_parallel_then_stream_answer() throws Exception {

// given
StreamingChatLanguageModel model = OpenAiChatModel.builder()
StreamingChatLanguageModel model = OpenAiStreamingChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.modelName(GPT_3_5_TURBO_1106) // supports parallel function calling
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.isStreaming(true)
.build();

UserMessage userMessage = userMessage("2+2=? 3+3=?");
Expand Down Expand Up @@ -415,15 +412,14 @@ void should_stream_valid_json() throws Exception {
String userMessage = "Return JSON with two fields: name and surname of Klaus Heisler. " +
"Before returning, tell me a joke."; // nudging it to say something additionally to json

StreamingChatLanguageModel model = OpenAiChatModel.builder()
StreamingChatLanguageModel model = OpenAiStreamingChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.modelName(GPT_3_5_TURBO_1106) // supports response_format = 'json_object'
.responseFormat("json_object")
.logRequests(true)
.logResponses(true)
.isStreaming(true)
.build();

// when
Expand Down Expand Up @@ -569,14 +565,13 @@ void should_accept_text_and_multiple_images_from_different_sources() {
void should_use_enum_as_model_name() {

// given
OpenAiChatModel model = OpenAiChatModel.builder()
OpenAiStreamingChatModel model = OpenAiStreamingChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.modelName(GPT_3_5_TURBO)
.logRequests(true)
.logResponses(true)
.isStreaming(true)
.build();

String question = "What is the capital of Germany?";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,8 @@ static Stream<StreamingChatLanguageModel> models() {
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.logRequests(true)
.logResponses(true)
.isStreaming(true)
.build(),
Azure







.builder()
AzureOpenAiStreamingChatModel.builder()
.endpoint(System.getenv("AZURE_OPENAI_ENDPOINT"))
.apiKey(System.getenv("AZURE_OPENAI_KEY"))
.logRequestsAndResponses(true)
Expand Down Expand Up @@ -254,7 +245,6 @@ void should_execute_multiple_tools_sequentially_then_answer() throws Exception {
.modelName(GPT_3_5_TURBO_0613) // this model can only call tools sequentially
.temperature(0.0)
.logRequests(true)
.isStreaming(true)
.logResponses(true)
.build();

Expand Down Expand Up @@ -354,7 +344,6 @@ void should_execute_multiple_tools_in_parallel_then_answer() throws Exception {
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.isStreaming(true)
.build();

ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ static Stream<StreamingChatLanguageModel> models() {
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.isStreaming(true)
.build(),
MistralAiStreamingChatModel.builder()
.apiKey(System.getenv("MISTRAL_AI_API_KEY"))
Expand Down

0 comments on commit 0a0ac22

Please sign in to comment.