Skip to content

Commit

Permalink
Add profile to use ollama + bump quarkus and langchain4j versions
Browse files Browse the repository at this point in the history
  • Loading branch information
mariofusco committed Jan 2, 2025
1 parent ede48cd commit 839e162
Show file tree
Hide file tree
Showing 24 changed files with 383 additions and 102 deletions.
4 changes: 2 additions & 2 deletions docs/docs/step-07.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Step 06 - Function calling and Tools
# Step 07 - Function calling and Tools

The RAG pattern allows passing knowledge to the LLM based on your own data.
It's a very popular pattern, but not the only one that can be used.
Expand All @@ -22,7 +22,7 @@ The result is sent back to the LLM, which can use it to continue the conversatio
In this step, we are going to see how to implement function calling in our application.
We will set up a database and create a function that allows the LLM to retrieve data (bookings, customers...) from the database.

The final code is available in the `step-06` folder.
The final code is available in the `step-07` folder.
However, we recommend you follow the step-by-step guide to understand how it works, and the different steps to implement this pattern.

## A couple of new dependencies
Expand Down
44 changes: 36 additions & 8 deletions step-01/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

<compiler-plugin.version>3.13.0</compiler-plugin.version>

<quarkus.platform.version>3.15.1</quarkus.platform.version>
<quarkus-langchain4j.version>0.18.0</quarkus-langchain4j.version>
<quarkus.platform.version>3.17.5</quarkus.platform.version>
<quarkus-langchain4j.version>0.22.0</quarkus-langchain4j.version>
</properties>

<dependencyManagement>
Expand All @@ -34,12 +34,6 @@

<dependencies>

<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-openai</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>

<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
Expand Down Expand Up @@ -106,4 +100,38 @@

</plugins>
</build>

<profiles>
<profile>
<id>openai</id>
<activation>
<activeByDefault>true</activeByDefault>
<property>
<name>openai</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-openai</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>
</dependencies>
</profile>
<profile>
<id>ollama</id>
<activation>
<property>
<name>ollama</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-ollama</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>
</dependencies>
</profile>
</profiles>
</project>
11 changes: 8 additions & 3 deletions step-01/src/main/resources/application.properties
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
quarkus.langchain4j.openai.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.log-requests=true
quarkus.langchain4j.log-responses=true

# OpenAI
quarkus.langchain4j.openai.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.openai.chat-model.model-name=gpt-4o
quarkus.langchain4j.openai.chat-model.log-requests=true
quarkus.langchain4j.openai.chat-model.log-responses=true

# Ollama
quarkus.langchain4j.ollama.chat-model.model-id=llama3.2
quarkus.langchain4j.ollama.timeout=180s
44 changes: 36 additions & 8 deletions step-02/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@

<compiler-plugin.version>3.13.0</compiler-plugin.version>

<quarkus.platform.version>3.15.1</quarkus.platform.version>
<quarkus-langchain4j.version>0.18.0</quarkus-langchain4j.version>
<quarkus.platform.version>3.17.5</quarkus.platform.version>
<quarkus-langchain4j.version>0.22.0</quarkus-langchain4j.version>
</properties>

<dependencyManagement>
Expand All @@ -35,12 +35,6 @@

<dependencies>

<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-openai</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>

<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
Expand Down Expand Up @@ -109,4 +103,38 @@

</plugins>
</build>

<profiles>
<profile>
<id>openai</id>
<activation>
<activeByDefault>true</activeByDefault>
<property>
<name>openai</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-openai</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>
</dependencies>
</profile>
<profile>
<id>ollama</id>
<activation>
<property>
<name>ollama</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-ollama</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>
</dependencies>
</profile>
</profiles>
</project>
12 changes: 9 additions & 3 deletions step-02/src/main/resources/application.properties
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
quarkus.langchain4j.openai.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.log-requests=true
quarkus.langchain4j.log-responses=true

# OpenAI
quarkus.langchain4j.openai.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.openai.chat-model.model-name=gpt-4o
quarkus.langchain4j.openai.chat-model.log-requests=true
quarkus.langchain4j.openai.chat-model.log-responses=true

quarkus.langchain4j.openai.chat-model.temperature=1.0
quarkus.langchain4j.openai.chat-model.max-tokens=1000
quarkus.langchain4j.openai.chat-model.frequency-penalty=0

# Ollama
quarkus.langchain4j.ollama.chat-model.model-id=llama3.2
quarkus.langchain4j.ollama.timeout=180s
quarkus.langchain4j.ollama.chat-model.temperature=1.0
45 changes: 36 additions & 9 deletions step-03/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

<compiler-plugin.version>3.13.0</compiler-plugin.version>

<quarkus.platform.version>3.15.1</quarkus.platform.version>
<quarkus-langchain4j.version>0.18.0</quarkus-langchain4j.version>
<quarkus.platform.version>3.17.5</quarkus.platform.version>
<quarkus-langchain4j.version>0.22.0</quarkus-langchain4j.version>
</properties>

<dependencyManagement>
Expand All @@ -34,12 +34,6 @@

<dependencies>

<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-openai</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>

<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
Expand Down Expand Up @@ -104,7 +98,40 @@
</execution>
</executions>
</plugin>

</plugins>
</build>

<profiles>
<profile>
<id>openai</id>
<activation>
<activeByDefault>true</activeByDefault>
<property>
<name>openai</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-openai</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>
</dependencies>
</profile>
<profile>
<id>ollama</id>
<activation>
<property>
<name>ollama</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-ollama</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>
</dependencies>
</profile>
</profiles>
</project>
12 changes: 8 additions & 4 deletions step-03/src/main/resources/application.properties
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
quarkus.langchain4j.openai.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.log-requests=true
quarkus.langchain4j.log-responses=true

# OpenAI
quarkus.langchain4j.openai.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.openai.chat-model.model-name=gpt-4o
quarkus.langchain4j.openai.chat-model.log-requests=true
quarkus.langchain4j.openai.chat-model.log-responses=true

quarkus.langchain4j.openai.chat-model.temperature=1.0
quarkus.langchain4j.openai.chat-model.max-tokens=1000
quarkus.langchain4j.openai.chat-model.frequency-penalty=0


# Ollama
quarkus.langchain4j.ollama.chat-model.model-id=llama3.2
quarkus.langchain4j.ollama.timeout=180s
quarkus.langchain4j.ollama.chat-model.temperature=1.0
44 changes: 36 additions & 8 deletions step-04/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

<compiler-plugin.version>3.13.0</compiler-plugin.version>

<quarkus.platform.version>3.15.1</quarkus.platform.version>
<quarkus-langchain4j.version>0.18.0</quarkus-langchain4j.version>
<quarkus.platform.version>3.17.5</quarkus.platform.version>
<quarkus-langchain4j.version>0.22.0</quarkus-langchain4j.version>
</properties>

<dependencyManagement>
Expand All @@ -34,12 +34,6 @@

<dependencies>

<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-openai</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>

<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
Expand Down Expand Up @@ -107,4 +101,38 @@

</plugins>
</build>

<profiles>
<profile>
<id>openai</id>
<activation>
<activeByDefault>true</activeByDefault>
<property>
<name>openai</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-openai</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>
</dependencies>
</profile>
<profile>
<id>ollama</id>
<activation>
<property>
<name>ollama</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>io.quarkiverse.langchain4j</groupId>
<artifactId>quarkus-langchain4j-ollama</artifactId>
<version>${quarkus-langchain4j.version}</version>
</dependency>
</dependencies>
</profile>
</profiles>
</project>
12 changes: 9 additions & 3 deletions step-04/src/main/resources/application.properties
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
quarkus.langchain4j.openai.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.log-requests=true
quarkus.langchain4j.log-responses=true

# OpenAI
quarkus.langchain4j.openai.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.openai.chat-model.model-name=gpt-4o
quarkus.langchain4j.openai.chat-model.log-requests=true
quarkus.langchain4j.openai.chat-model.log-responses=true

quarkus.langchain4j.openai.chat-model.temperature=1.0
quarkus.langchain4j.openai.chat-model.max-tokens=1000
quarkus.langchain4j.openai.chat-model.frequency-penalty=0

# Ollama
quarkus.langchain4j.ollama.chat-model.model-id=llama3.2
quarkus.langchain4j.ollama.timeout=180s
quarkus.langchain4j.ollama.chat-model.temperature=1.0
Loading

0 comments on commit 839e162

Please sign in to comment.