Skip to content

Commit

Permalink
Python: Upgrade Minimum Onnx Version to enable MacOS Unit Tests (#9981)
Browse files Browse the repository at this point in the history
Closes : #9979

enabled test cases for mac os
upgraded uv.lock file

### Motivation and Context

Using version 0.4.0 did not provide a pip package for MacOS, which
forced us to disabled Unit Tests on MacOs
With version 0.5.0 available we can enable the unit Tests for MacOS.

Using Version 0.5.0 will enable following features for users :

1. Phi3.5 and Phi3.5 MoE
2. MacOS Support without Building the Code from Source
3. LoRa Adapter Swapping

### Contribution Checklist

<!-- Before submitting this PR, please make sure: -->

- [x] The code builds clean without any errors or warnings
- [x] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [x] All unit tests pass, and I have added new tests where possible
- [x] I didn't break anyone 😄

---------

Co-authored-by: Eduard van Valkenburg <[email protected]>
  • Loading branch information
nmoeller and eavanvalkenburg authored Dec 19, 2024
1 parent 3bab848 commit 8b389b8
Show file tree
Hide file tree
Showing 10 changed files with 421 additions and 407 deletions.
5 changes: 3 additions & 2 deletions python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ ollama = [
"ollama ~= 0.4"
]
onnx = [
"onnxruntime-genai ~= 0.4; platform_system != 'Darwin'"
"onnxruntime-genai ~= 0.5"
]
anthropic = [
"anthropic ~= 0.32"
Expand Down Expand Up @@ -156,7 +156,8 @@ filterwarnings = [
]
timeout = 120
markers = [
"ollama: mark a test as requiring the Ollama service (use \"not ollama\" to skip those tests)"
"ollama: mark a test as requiring the Ollama service (use \"not ollama\" to skip those tests)",
"onnx: mark a test as requiring the Onnx service (use \"not onnx\" to skip those tests)"
]

[tool.ruff]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@


import os
import platform
import sys
from typing import Annotated

Expand All @@ -22,6 +21,7 @@
from semantic_kernel.connectors.ai.google.vertex_ai import VertexAIChatCompletion, VertexAIChatPromptExecutionSettings
from semantic_kernel.connectors.ai.mistral_ai import MistralAIChatCompletion, MistralAIChatPromptExecutionSettings
from semantic_kernel.connectors.ai.ollama import OllamaChatCompletion, OllamaChatPromptExecutionSettings
from semantic_kernel.connectors.ai.onnx import OnnxGenAIChatCompletion, OnnxGenAIPromptExecutionSettings, ONNXTemplate
from semantic_kernel.connectors.ai.open_ai import (
AzureChatCompletion,
AzureChatPromptExecutionSettings,
Expand Down Expand Up @@ -71,12 +71,6 @@
bedrock_setup: bool = is_service_setup_for_testing(["AWS_DEFAULT_REGION"], raise_if_not_set=False)


skip_on_mac_available = platform.system() == "Darwin"
if not skip_on_mac_available:
from semantic_kernel.connectors.ai.onnx import OnnxGenAIChatCompletion, OnnxGenAIPromptExecutionSettings
from semantic_kernel.connectors.ai.onnx.utils import ONNXTemplate


# A mock plugin that contains a function that returns a complex object.
class PersonDetails(KernelBaseModel):
id: str
Expand Down Expand Up @@ -155,7 +149,7 @@ def services(self) -> dict[str, tuple[ServiceType | None, type[PromptExecutionSe
"vertex_ai": (VertexAIChatCompletion() if vertex_ai_setup else None, VertexAIChatPromptExecutionSettings),
"onnx_gen_ai": (
OnnxGenAIChatCompletion(template=ONNXTemplate.PHI3V) if onnx_setup else None,
OnnxGenAIPromptExecutionSettings if not skip_on_mac_available else None,
OnnxGenAIPromptExecutionSettings,
),
"bedrock_amazon_titan": (
BedrockChatCompletion(model_id="amazon.titan-text-premier-v1:0") if bedrock_setup else None,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,10 @@
ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]),
],
{},
marks=pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
marks=(
pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
pytest.mark.onnx,
),
id="onnx_gen_ai_image_input_file",
),
pytest.param(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,10 @@ class Reasoning(KernelBaseModel):
ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]),
],
{},
marks=pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
marks=(
pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
pytest.mark.onnx,
),
id="onnx_gen_ai",
),
# endregion
Expand Down
14 changes: 6 additions & 8 deletions python/tests/integration/completions/test_text_completion.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# Copyright (c) Microsoft. All rights reserved.

import platform
import sys
from functools import partial
from typing import Any
Expand All @@ -19,6 +18,7 @@
from semantic_kernel.connectors.ai.google.vertex_ai import VertexAITextCompletion, VertexAITextPromptExecutionSettings
from semantic_kernel.connectors.ai.hugging_face import HuggingFacePromptExecutionSettings, HuggingFaceTextCompletion
from semantic_kernel.connectors.ai.ollama import OllamaTextCompletion, OllamaTextPromptExecutionSettings
from semantic_kernel.connectors.ai.onnx import OnnxGenAIPromptExecutionSettings, OnnxGenAITextCompletion
from semantic_kernel.connectors.ai.open_ai import (
AzureOpenAISettings,
AzureTextCompletion,
Expand All @@ -43,11 +43,6 @@
) # Tests are optional for ONNX
bedrock_setup = is_service_setup_for_testing(["AWS_DEFAULT_REGION"], raise_if_not_set=False)

skip_on_mac_available = platform.system() == "Darwin"
if not skip_on_mac_available:
from semantic_kernel.connectors.ai.onnx import OnnxGenAIPromptExecutionSettings, OnnxGenAITextCompletion


pytestmark = pytest.mark.parametrize(
"service_id, execution_settings_kwargs, inputs, kwargs",
[
Expand Down Expand Up @@ -128,7 +123,10 @@
{},
["<|user|>Repeat the word Hello<|end|><|assistant|>"],
{},
marks=pytest.mark.skipif(not onnx_setup, reason="Need local Onnx setup"),
marks=(
pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
pytest.mark.onnx,
),
id="onnx_gen_ai_text_completion",
),
pytest.param(
Expand Down Expand Up @@ -242,7 +240,7 @@ def services(self) -> dict[str, tuple[ServiceType | None, type[PromptExecutionSe
),
"onnx_gen_ai": (
OnnxGenAITextCompletion() if onnx_setup else None,
OnnxGenAIPromptExecutionSettings if not skip_on_mac_available else None,
OnnxGenAIPromptExecutionSettings,
),
# Amazon Bedrock supports models from multiple providers but requests to and responses from the models are
# inconsistent. So we need to test each model separately.
Expand Down
Loading

0 comments on commit 8b389b8

Please sign in to comment.