diff --git a/.github/workflows/causal_lm_cpp.yml b/.github/workflows/causal_lm_cpp.yml index fb0c9c4b0b..b6abbefac0 100644 --- a/.github/workflows/causal_lm_cpp.yml +++ b/.github/workflows/causal_lm_cpp.yml @@ -53,17 +53,17 @@ jobs: wget https://huggingface.co/smangrul/tinyllama_lora_sql/resolve/main/adapter_model.safetensors?download=true -O adapter_model.safetensors - run: > . ./ov/setupvars.sh - && timeout 25s ./build/samples/cpp/multinomial_causal_lm/multinomial_causal_lm ./open_llama_3b_v2/ a + && timeout 35s ./build/samples/cpp/multinomial_causal_lm/multinomial_causal_lm ./open_llama_3b_v2/ a env: PYTHONPATH: "./build" - run: > . ./ov/setupvars.sh - && timeout 25s ./samples/python/multinomial_causal_lm/multinomial_causal_lm.py ./open_llama_3b_v2/ b + && timeout 35s ./samples/python/multinomial_causal_lm/multinomial_causal_lm.py ./open_llama_3b_v2/ b env: PYTHONPATH: "./build" - run: > . ./ov/setupvars.sh - && timeout 25s ./build/samples/cpp/text_generation/greedy_causal_lm ./open_llama_3b_v2/ "return 0" + && timeout 35s ./build/samples/cpp/text_generation/greedy_causal_lm ./open_llama_3b_v2/ "return 0" | diff <(timeout 25s samples/python/text_generation/greedy_causal_lm.py ./open_llama_3b_v2/ "return 0") - env: PYTHONPATH: "./build" diff --git a/.github/workflows/genai-tools.yml b/.github/workflows/genai-tools.yml index 2ded72510d..61658dcbda 100644 --- a/.github/workflows/genai-tools.yml +++ b/.github/workflows/genai-tools.yml @@ -44,7 +44,7 @@ jobs: with: platform: ubuntu22 commit_packages_to_provide: wheels - revision: 345163f87953fb0dd8dd590257eb7fc84378da8e + revision: latest_available_commit llm_bench: name: 'LLM bench tests' @@ -128,6 +128,7 @@ jobs: optimum-cli export openvino --trust-remote-code --model openai/whisper-tiny ./ov_models/whisper-tiny python ./tools/llm_bench/benchmark.py -m ./ov_models/whisper-tiny --media multilingual_librispeech/data/mls_polish/train/audio/3283_1447_000/3283_1447_000000.flac -d cpu -n 1 --optimum - name: Test openai/whisper-tiny via GenAI + run: | python ./tools/llm_bench/benchmark.py -m ./ov_models/whisper-tiny --media multilingual_librispeech/data/mls_polish/train/audio/3283_1447_000/3283_1447_000000.flac -d cpu -n 1 rm -rf ./ov_models/whisper-tiny rm -rf multilingual_librispeech @@ -136,6 +137,7 @@ jobs: optimum-cli export openvino --model katuni4ka/tiny-random-llava-next ./ov_models/tiny-random-llava-next --task image-text-to-text --trust-remote-code python ./tools/llm_bench/benchmark.py -m ./ov_models/tiny-random-llava-next --media https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11 --prompt "What is unusual on this image?" -ic 20 --optimum - name: Test katuni4ka/tiny-random-llava-next via GenAI + run: | python ./tools/llm_bench/benchmark.py -m ./ov_models/tiny-random-llava-next --media https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11 --prompt "What is unusual on this image?" -ic 20 rm -rf ./ov_models diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 0d7a5b7bae..0a991e2a54 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -52,7 +52,7 @@ jobs: with: platform: ubuntu22 commit_packages_to_provide: wheels - revision: 345163f87953fb0dd8dd590257eb7fc84378da8e + revision: latest_available_commit - name: Clone docker tag from OpenVINO repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 062b83fc27..7cb0ff98d3 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -17,7 +17,7 @@ concurrency: env: PYTHON_VERSION: '3.10' - OV_BRANCH: 345163f87953fb0dd8dd590257eb7fc84378da8e + OV_BRANCH: 'master' OV_TARBALL: '' jobs: diff --git a/.github/workflows/stable_diffusion_1_5_cpp.yml b/.github/workflows/stable_diffusion_1_5_cpp.yml index 3b01697f26..e0bf5371b3 100644 --- a/.github/workflows/stable_diffusion_1_5_cpp.yml +++ b/.github/workflows/stable_diffusion_1_5_cpp.yml @@ -45,7 +45,7 @@ jobs: with: platform: ubuntu22 commit_packages_to_provide: wheels - revision: 345163f87953fb0dd8dd590257eb7fc84378da8e + revision: latest_available_commit openvino_download_windows: name: Download OpenVINO for Windows @@ -71,7 +71,7 @@ jobs: with: platform: windows commit_packages_to_provide: wheels - revision: 345163f87953fb0dd8dd590257eb7fc84378da8e + revision: latest_available_commit stable_diffusion_1_5_cpp-linux: runs-on: ubuntu-22.04-8-cores diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 8f43af44ae..e65972110b 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -17,7 +17,7 @@ concurrency: env: PYTHON_VERSION: '3.11' - OV_BRANCH: 345163f87953fb0dd8dd590257eb7fc84378da8e + OV_BRANCH: 'master' OV_TARBALL: '' jobs: diff --git a/README.md b/README.md index c5cf799973..cea1e358bc 100644 --- a/README.md +++ b/README.md @@ -73,9 +73,9 @@ optimum-cli export openvino --model "TinyLlama/TinyLlama-1.1B-Chat-v1.0" --weigh ### Run generation using LLMPipeline API in Python ```python -import openvino_genai as ov_genai +import openvino_genai #Will run model on CPU, GPU or NPU are possible options -pipe = ov_genai.LLMPipeline("./TinyLlama-1.1B-Chat-v1.0/", "CPU") +pipe = openvino_genai.LLMPipeline("./TinyLlama-1.1B-Chat-v1.0/", "CPU") print(pipe.generate("The Sun is yellow because", max_new_tokens=100)) ``` @@ -128,11 +128,11 @@ curl -O "https://storage.openvinotoolkit.org/test_data/images/dog.jpg" ```python import numpy as np import openvino as ov -import openvino_genai as ov_genai +import openvino_genai from PIL import Image # Choose GPU instead of CPU in the line below to run the model on Intel integrated or discrete GPU -pipe = ov_genai.VLMPipeline("./InternVL2-1B", "CPU") +pipe = openvino_genai.VLMPipeline("./InternVL2-1B", "CPU") pipe.start_chat() image = Image.open("dog.jpg")