diff --git a/.github/workflows/python-test-coverage.yml b/.github/workflows/python-test-coverage.yml index d2aebc3796c6..59bdb8f4aaba 100644 --- a/.github/workflows/python-test-coverage.yml +++ b/.github/workflows/python-test-coverage.yml @@ -1,55 +1,53 @@ name: Python Test Coverage on: - pull_request_target: + pull_request: branches: ["main", "feature*"] paths: - - "python/**" - workflow_run: - workflows: ["Python Unit Tests"] - types: - - in_progress + - "python/semantic_kernel/**" + - "python/tests/unit/**" +env: + # Configure a constant location for the uv cache + UV_CACHE_DIR: /tmp/.uv-cache + +permissions: + contents: write + checks: write + pull-requests: write jobs: python-tests-coverage: runs-on: ubuntu-latest - continue-on-error: true - permissions: - pull-requests: write - contents: read - actions: read + continue-on-error: false + defaults: + run: + working-directory: python + env: + UV_PYTHON: "3.10" steps: - - name: Wait for unit tests to succeed - uses: lewagon/wait-on-check-action@v1.3.4 - with: - ref: ${{ github.event.pull_request.head.sha }} - check-name: 'Python Test Coverage' - repo-token: ${{ secrets.GH_ACTIONS_PR_WRITE }} - wait-interval: 90 - allowed-conclusions: success - uses: actions/checkout@v4 - name: Setup filename variables run: echo "FILE_ID=${{ github.event.number }}" >> $GITHUB_ENV - - name: Download Files - uses: actions/download-artifact@v4 + - name: Set up uv + uses: astral-sh/setup-uv@v4 with: - github-token: ${{ secrets.GH_ACTIONS_PR_WRITE }} - run-id: ${{ github.event.workflow_run.id }} - path: python/ - merge-multiple: true - - name: Display structure of downloaded files - run: ls python/ + version: "0.5.x" + enable-cache: true + cache-suffix: ${{ runner.os }}-${{ env.UV_PYTHON }} + - name: Install the project + run: uv sync --all-extras --dev + - name: Test with pytest + run: uv run --frozen pytest -q --junitxml=pytest.xml --cov=semantic_kernel --cov-report=term-missing:skip-covered ./tests/unit | tee python-coverage.txt - name: Pytest coverage comment id: coverageComment uses: MishaKav/pytest-coverage-comment@main - continue-on-error: true + continue-on-error: false with: - github-token: ${{ secrets.GH_ACTIONS_PR_WRITE }} - pytest-coverage-path: python-coverage.txt + pytest-coverage-path: python/python-coverage.txt coverage-path-prefix: "python/" title: "Python Test Coverage Report" badge-title: "Python Test Coverage" junitxml-title: "Python Unit Test Overview" - junitxml-path: pytest.xml + junitxml-path: python/pytest.xml default-branch: "main" - unique-id-for-comment: python-test-coverage + report-only-changed-files: true diff --git a/.github/workflows/python-unit-tests.yml b/.github/workflows/python-unit-tests.yml index aec1937984f5..745572c274b0 100644 --- a/.github/workflows/python-unit-tests.yml +++ b/.github/workflows/python-unit-tests.yml @@ -62,44 +62,3 @@ jobs: display-options: fEX fail-on-empty: true title: Test results - python-test-coverage: - name: Python Test Coverage - runs-on: [ubuntu-latest] - continue-on-error: true - permissions: - contents: write - defaults: - run: - working-directory: python - env: - UV_PYTHON: "3.10" - steps: - - uses: actions/checkout@v4 - - name: Setup filename variables - run: echo "FILE_ID=${{ github.event.number }}" >> $GITHUB_ENV - - name: Set up uv - uses: astral-sh/setup-uv@v4 - with: - version: "0.5.x" - enable-cache: true - cache-suffix: ${{ runner.os }}-${{ env.UV_PYTHON }} - - name: Install the project - run: uv sync --all-extras --dev - - name: Test with pytest - run: uv run --frozen pytest -q --junitxml=pytest.xml --cov=semantic_kernel --cov-report=term-missing:skip-covered ./tests/unit | tee python-coverage.txt - - name: Upload coverage - if: always() - uses: actions/upload-artifact@v4 - with: - name: python-coverage-${{ env.FILE_ID }}.txt - path: python/python-coverage.txt - overwrite: true - retention-days: 1 - - name: Upload pytest.xml - if: always() - uses: actions/upload-artifact@v4 - with: - name: pytest-${{ env.FILE_ID }}.xml - path: python/pytest.xml - overwrite: true - retention-days: 1 diff --git a/python/semantic_kernel/connectors/ai/anthropic/prompt_execution_settings/anthropic_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/anthropic/prompt_execution_settings/anthropic_prompt_execution_settings.py index 5c80506e3297..8541fd0dc651 100644 --- a/python/semantic_kernel/connectors/ai/anthropic/prompt_execution_settings/anthropic_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/anthropic/prompt_execution_settings/anthropic_prompt_execution_settings.py @@ -32,7 +32,6 @@ class AnthropicChatPromptExecutionSettings(AnthropicPromptExecutionSettings): tools: Annotated[ list[dict[str, Any]] | None, Field( - max_length=64, description=( "Do not set this manually. It is set by the service based on the function choice configuration." ), diff --git a/python/semantic_kernel/connectors/ai/azure_ai_inference/__init__.py b/python/semantic_kernel/connectors/ai/azure_ai_inference/__init__.py index e6ba7c02f6c3..26c4e954addb 100644 --- a/python/semantic_kernel/connectors/ai/azure_ai_inference/__init__.py +++ b/python/semantic_kernel/connectors/ai/azure_ai_inference/__init__.py @@ -3,6 +3,7 @@ from semantic_kernel.connectors.ai.azure_ai_inference.azure_ai_inference_prompt_execution_settings import ( AzureAIInferenceChatPromptExecutionSettings, AzureAIInferenceEmbeddingPromptExecutionSettings, + AzureAIInferencePromptExecutionSettings, ) from semantic_kernel.connectors.ai.azure_ai_inference.azure_ai_inference_settings import AzureAIInferenceSettings from semantic_kernel.connectors.ai.azure_ai_inference.services.azure_ai_inference_chat_completion import ( @@ -16,6 +17,7 @@ "AzureAIInferenceChatCompletion", "AzureAIInferenceChatPromptExecutionSettings", "AzureAIInferenceEmbeddingPromptExecutionSettings", + "AzureAIInferencePromptExecutionSettings", "AzureAIInferenceSettings", "AzureAIInferenceTextEmbedding", ] diff --git a/python/semantic_kernel/connectors/ai/azure_ai_inference/azure_ai_inference_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/azure_ai_inference/azure_ai_inference_prompt_execution_settings.py index a8be8303e6b3..ac290925b399 100644 --- a/python/semantic_kernel/connectors/ai/azure_ai_inference/azure_ai_inference_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/azure_ai_inference/azure_ai_inference_prompt_execution_settings.py @@ -33,7 +33,6 @@ class AzureAIInferenceChatPromptExecutionSettings(AzureAIInferencePromptExecutio tools: Annotated[ list[dict[str, Any]] | None, Field( - max_length=64, description="Do not set this manually. It is set by the service based " "on the function choice configuration.", ), diff --git a/python/semantic_kernel/connectors/ai/bedrock/bedrock_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/bedrock/bedrock_prompt_execution_settings.py index ca33d3123490..0ee4377f350b 100644 --- a/python/semantic_kernel/connectors/ai/bedrock/bedrock_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/bedrock/bedrock_prompt_execution_settings.py @@ -24,7 +24,6 @@ class BedrockChatPromptExecutionSettings(BedrockPromptExecutionSettings): tools: Annotated[ list[dict[str, Any]] | None, Field( - max_length=64, description="Do not set this manually. It is set by the service based " "on the function choice configuration.", ), diff --git a/python/semantic_kernel/connectors/ai/google/google_ai/google_ai_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/google/google_ai/google_ai_prompt_execution_settings.py index 99e7ad1f8d56..fe5ace16df80 100644 --- a/python/semantic_kernel/connectors/ai/google/google_ai/google_ai_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/google/google_ai/google_ai_prompt_execution_settings.py @@ -38,7 +38,6 @@ class GoogleAIChatPromptExecutionSettings(GoogleAIPromptExecutionSettings): tools: Annotated[ list[dict[str, Any]] | None, Field( - max_length=64, description="Do not set this manually. It is set by the service based " "on the function choice configuration.", ), diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py index 29b9e13e1278..21497d197634 100644 --- a/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py @@ -7,6 +7,7 @@ from typing import override # pragma: no cover else: from typing_extensions import override # pragma: no cover + from pydantic import Field from vertexai.generative_models import Tool, ToolConfig @@ -38,7 +39,6 @@ class VertexAIChatPromptExecutionSettings(VertexAIPromptExecutionSettings): tools: Annotated[ list[Tool] | None, Field( - max_length=64, description="Do not set this manually. It is set by the service based " "on the function choice configuration.", ), diff --git a/python/semantic_kernel/connectors/ai/mistral_ai/prompt_execution_settings/mistral_ai_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/mistral_ai/prompt_execution_settings/mistral_ai_prompt_execution_settings.py index ce61d75740f5..ea709172950f 100644 --- a/python/semantic_kernel/connectors/ai/mistral_ai/prompt_execution_settings/mistral_ai_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/mistral_ai/prompt_execution_settings/mistral_ai_prompt_execution_settings.py @@ -45,7 +45,6 @@ class MistralAIChatPromptExecutionSettings(MistralAIPromptExecutionSettings): tools: Annotated[ list[dict[str, Any]] | None, Field( - max_length=64, description="Do not set this manually. It is set by the service based " "on the function choice configuration.", ), diff --git a/python/semantic_kernel/connectors/ai/ollama/ollama_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/ollama/ollama_prompt_execution_settings.py index f315f971e91f..ebe8cc8e6cd3 100644 --- a/python/semantic_kernel/connectors/ai/ollama/ollama_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/ollama/ollama_prompt_execution_settings.py @@ -13,9 +13,6 @@ class OllamaPromptExecutionSettings(PromptExecutionSettings): format: Literal["json"] | None = None options: dict[str, Any] | None = None - # TODO(@taochen): Add individual properties for execution settings and - # convert them to the appropriate types in the options dictionary. - class OllamaTextPromptExecutionSettings(OllamaPromptExecutionSettings): """Settings for Ollama text prompt execution.""" @@ -32,7 +29,6 @@ class OllamaChatPromptExecutionSettings(OllamaPromptExecutionSettings): tools: Annotated[ list[dict[str, Any]] | None, Field( - max_length=64, description="Do not set this manually. It is set by the service based " "on the function choice configuration.", ), diff --git a/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/open_ai_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/open_ai_prompt_execution_settings.py index f85f03289d92..1ff6c993ea24 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/open_ai_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/open_ai_prompt_execution_settings.py @@ -76,7 +76,6 @@ class OpenAIChatPromptExecutionSettings(OpenAIPromptExecutionSettings): tools: Annotated[ list[dict[str, Any]] | None, Field( - max_length=64, description="Do not set this manually. It is set by the service based " "on the function choice configuration.", ), diff --git a/python/tests/unit/connectors/ai/azure_ai_inference/test_azure_ai_inference_request_settings.py b/python/tests/unit/connectors/ai/azure_ai_inference/test_azure_ai_inference_request_settings.py new file mode 100644 index 000000000000..7fc8fb1e10da --- /dev/null +++ b/python/tests/unit/connectors/ai/azure_ai_inference/test_azure_ai_inference_request_settings.py @@ -0,0 +1,143 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.connectors.ai.azure_ai_inference import ( + AzureAIInferenceChatPromptExecutionSettings, + AzureAIInferenceEmbeddingPromptExecutionSettings, + AzureAIInferencePromptExecutionSettings, +) +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + + +def test_default_azure_ai_inference_prompt_execution_settings(): + settings = AzureAIInferencePromptExecutionSettings() + + assert settings.frequency_penalty is None + assert settings.max_tokens is None + assert settings.presence_penalty is None + assert settings.seed is None + assert settings.stop is None + assert settings.temperature is None + assert settings.top_p is None + assert settings.extra_parameters is None + + +def test_custom_azure_ai_inference_prompt_execution_settings(): + settings = AzureAIInferencePromptExecutionSettings( + frequency_penalty=0.5, + max_tokens=128, + presence_penalty=0.5, + seed=1, + stop="world", + temperature=0.5, + top_p=0.5, + extra_parameters={"key": "value"}, + ) + + assert settings.frequency_penalty == 0.5 + assert settings.max_tokens == 128 + assert settings.presence_penalty == 0.5 + assert settings.seed == 1 + assert settings.stop == "world" + assert settings.temperature == 0.5 + assert settings.top_p == 0.5 + assert settings.extra_parameters == {"key": "value"} + + +def test_azure_ai_inference_prompt_execution_settings_from_default_completion_config(): + settings = PromptExecutionSettings(service_id="test_service") + chat_settings = AzureAIInferenceChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.service_id == "test_service" + assert chat_settings.frequency_penalty is None + assert chat_settings.max_tokens is None + assert chat_settings.presence_penalty is None + assert chat_settings.seed is None + assert chat_settings.stop is None + assert chat_settings.temperature is None + assert chat_settings.top_p is None + assert chat_settings.extra_parameters is None + + +def test_azure_ai_inference_prompt_execution_settings_from_openai_prompt_execution_settings(): + chat_settings = AzureAIInferenceChatPromptExecutionSettings(service_id="test_service", temperature=1.0) + new_settings = AzureAIInferencePromptExecutionSettings(service_id="test_2", temperature=0.0) + chat_settings.update_from_prompt_execution_settings(new_settings) + + assert chat_settings.service_id == "test_2" + assert chat_settings.temperature == 0.0 + + +def test_azure_ai_inference_prompt_execution_settings_from_custom_completion_config(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "frequency_penalty": 0.5, + "max_tokens": 128, + "presence_penalty": 0.5, + "seed": 1, + "stop": "world", + "temperature": 0.5, + "top_p": 0.5, + "extra_parameters": {"key": "value"}, + }, + ) + chat_settings = AzureAIInferenceChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.service_id == "test_service" + assert chat_settings.frequency_penalty == 0.5 + assert chat_settings.max_tokens == 128 + assert chat_settings.presence_penalty == 0.5 + assert chat_settings.seed == 1 + assert chat_settings.stop == "world" + assert chat_settings.temperature == 0.5 + assert chat_settings.top_p == 0.5 + assert chat_settings.extra_parameters == {"key": "value"} + + +def test_azure_ai_inference_chat_prompt_execution_settings_from_custom_completion_config_with_functions(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "tools": [{"function": {}}], + }, + ) + chat_settings = AzureAIInferenceChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.tools == [{"function": {}}] + + +def test_create_options(): + settings = AzureAIInferenceChatPromptExecutionSettings( + service_id="test_service", + extension_data={ + "frequency_penalty": 0.5, + "max_tokens": 128, + "presence_penalty": 0.5, + "seed": 1, + "stop": "world", + "temperature": 0.5, + "top_p": 0.5, + "extra_parameters": {"key": "value"}, + }, + ) + options = settings.prepare_settings_dict() + + assert options["frequency_penalty"] == 0.5 + assert options["max_tokens"] == 128 + assert options["presence_penalty"] == 0.5 + assert options["seed"] == 1 + assert options["stop"] == "world" + assert options["temperature"] == 0.5 + assert options["top_p"] == 0.5 + assert options["extra_parameters"] == {"key": "value"} + assert "tools" not in options + assert "tool_config" not in options + + +def test_default_azure_ai_inference_embedding_prompt_execution_settings(): + settings = AzureAIInferenceEmbeddingPromptExecutionSettings() + + assert settings.dimensions is None + assert settings.encoding_format is None + assert settings.input_type is None + assert settings.extra_parameters is None diff --git a/python/tests/unit/connectors/ai/bedrock/test_bedrock_request_settings.py b/python/tests/unit/connectors/ai/bedrock/test_bedrock_request_settings.py new file mode 100644 index 000000000000..d963a0ef8457 --- /dev/null +++ b/python/tests/unit/connectors/ai/bedrock/test_bedrock_request_settings.py @@ -0,0 +1,104 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from semantic_kernel.connectors.ai.bedrock import BedrockChatPromptExecutionSettings, BedrockPromptExecutionSettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + + +def test_default_bedrock_prompt_execution_settings(): + settings = BedrockPromptExecutionSettings() + + assert settings.temperature is None + assert settings.top_p is None + assert settings.top_k is None + assert settings.max_tokens is None + assert settings.stop == [] + + +def test_custom_bedrock_prompt_execution_settings(): + settings = BedrockPromptExecutionSettings( + temperature=0.5, + top_p=0.5, + top_k=10, + max_tokens=128, + stop=["world"], + ) + + assert settings.temperature == 0.5 + assert settings.top_p == 0.5 + assert settings.top_k == 10 + assert settings.max_tokens == 128 + assert settings.stop == ["world"] + + +def test_bedrock_prompt_execution_settings_from_default_completion_config(): + settings = PromptExecutionSettings(service_id="test_service") + chat_settings = BedrockChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.service_id == "test_service" + assert chat_settings.temperature is None + assert chat_settings.top_p is None + assert chat_settings.top_k is None + assert chat_settings.max_tokens is None + assert chat_settings.stop == [] + + +def test_bedrock_prompt_execution_settings_from_openai_prompt_execution_settings(): + chat_settings = BedrockChatPromptExecutionSettings(service_id="test_service", temperature=1.0) + new_settings = BedrockPromptExecutionSettings(service_id="test_2", temperature=0.0) + chat_settings.update_from_prompt_execution_settings(new_settings) + + assert chat_settings.service_id == "test_2" + assert chat_settings.temperature == 0.0 + + +def test_bedrock_prompt_execution_settings_from_custom_completion_config(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "temperature": 0.5, + "top_p": 0.5, + "top_k": 10, + "max_tokens": 128, + "stop": ["world"], + }, + ) + chat_settings = BedrockChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.temperature == 0.5 + assert chat_settings.top_p == 0.5 + assert chat_settings.top_k == 10 + assert chat_settings.max_tokens == 128 + assert chat_settings.stop == ["world"] + + +def test_bedrock_chat_prompt_execution_settings_from_custom_completion_config_with_functions(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "tools": [{"function": {}}], + }, + ) + chat_settings = BedrockChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.tools == [{"function": {}}] + + +def test_create_options(): + settings = BedrockPromptExecutionSettings( + service_id="test_service", + extension_data={ + "temperature": 0.5, + "top_p": 0.5, + "top_k": 10, + "max_tokens": 128, + "stop": ["world"], + }, + ) + options = settings.prepare_settings_dict() + + assert options["temperature"] == 0.5 + assert options["top_p"] == 0.5 + assert options["top_k"] == 10 + assert options["max_tokens"] == 128 + assert options["stop"] == ["world"] diff --git a/python/tests/unit/connectors/ai/google/google_ai/test_google_ai_request_settings.py b/python/tests/unit/connectors/ai/google/google_ai/test_google_ai_request_settings.py new file mode 100644 index 000000000000..07b2f0267b5b --- /dev/null +++ b/python/tests/unit/connectors/ai/google/google_ai/test_google_ai_request_settings.py @@ -0,0 +1,133 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.connectors.ai.google.google_ai import ( + GoogleAIChatPromptExecutionSettings, + GoogleAIEmbeddingPromptExecutionSettings, + GoogleAIPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + + +def test_default_google_ai_prompt_execution_settings(): + settings = GoogleAIPromptExecutionSettings() + + assert settings.stop_sequences is None + assert settings.response_mime_type is None + assert settings.response_schema is None + assert settings.candidate_count is None + assert settings.max_output_tokens is None + assert settings.temperature is None + assert settings.top_p is None + assert settings.top_k is None + + +def test_custom_google_ai_prompt_execution_settings(): + settings = GoogleAIPromptExecutionSettings( + stop_sequences=["world"], + response_mime_type="text/plain", + candidate_count=1, + max_output_tokens=128, + temperature=0.5, + top_p=0.5, + top_k=10, + ) + + assert settings.stop_sequences == ["world"] + assert settings.response_mime_type == "text/plain" + assert settings.candidate_count == 1 + assert settings.max_output_tokens == 128 + assert settings.temperature == 0.5 + assert settings.top_p == 0.5 + assert settings.top_k == 10 + + +def test_google_ai_prompt_execution_settings_from_default_completion_config(): + settings = PromptExecutionSettings(service_id="test_service") + chat_settings = GoogleAIChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.service_id == "test_service" + assert chat_settings.stop_sequences is None + assert chat_settings.response_mime_type is None + assert chat_settings.response_schema is None + assert chat_settings.candidate_count is None + assert chat_settings.max_output_tokens is None + assert chat_settings.temperature is None + assert chat_settings.top_p is None + assert chat_settings.top_k is None + + +def test_google_ai_prompt_execution_settings_from_openai_prompt_execution_settings(): + chat_settings = GoogleAIChatPromptExecutionSettings(service_id="test_service", temperature=1.0) + new_settings = GoogleAIPromptExecutionSettings(service_id="test_2", temperature=0.0) + chat_settings.update_from_prompt_execution_settings(new_settings) + + assert chat_settings.service_id == "test_2" + assert chat_settings.temperature == 0.0 + + +def test_google_ai_prompt_execution_settings_from_custom_completion_config(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "stop_sequences": ["world"], + "response_mime_type": "text/plain", + "candidate_count": 1, + "max_output_tokens": 128, + "temperature": 0.5, + "top_p": 0.5, + "top_k": 10, + }, + ) + chat_settings = GoogleAIChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.stop_sequences == ["world"] + assert chat_settings.response_mime_type == "text/plain" + assert chat_settings.candidate_count == 1 + assert chat_settings.max_output_tokens == 128 + assert chat_settings.temperature == 0.5 + assert chat_settings.top_p == 0.5 + assert chat_settings.top_k == 10 + + +def test_google_ai_chat_prompt_execution_settings_from_custom_completion_config_with_functions(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "tools": [{"function": {}}], + }, + ) + chat_settings = GoogleAIChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.tools == [{"function": {}}] + + +def test_create_options(): + settings = GoogleAIChatPromptExecutionSettings( + service_id="test_service", + extension_data={ + "stop_sequences": ["world"], + "response_mime_type": "text/plain", + "candidate_count": 1, + "max_output_tokens": 128, + "temperature": 0.5, + "top_p": 0.5, + "top_k": 10, + }, + ) + options = settings.prepare_settings_dict() + + assert options["stop_sequences"] == ["world"] + assert options["response_mime_type"] == "text/plain" + assert options["candidate_count"] == 1 + assert options["max_output_tokens"] == 128 + assert options["temperature"] == 0.5 + assert options["top_p"] == 0.5 + assert options["top_k"] == 10 + assert "tools" not in options + assert "tool_config" not in options + + +def test_default_google_ai_embedding_prompt_execution_settings(): + settings = GoogleAIEmbeddingPromptExecutionSettings() + + assert settings.output_dimensionality is None diff --git a/python/tests/unit/connectors/ai/google/vertex_ai/test_vertex_ai_request_settings.py b/python/tests/unit/connectors/ai/google/vertex_ai/test_vertex_ai_request_settings.py new file mode 100644 index 000000000000..2abff104a8b3 --- /dev/null +++ b/python/tests/unit/connectors/ai/google/vertex_ai/test_vertex_ai_request_settings.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.connectors.ai.google.vertex_ai import ( + VertexAIChatPromptExecutionSettings, + VertexAIPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIEmbeddingPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + + +def test_default_vertex_ai_prompt_execution_settings(): + settings = VertexAIPromptExecutionSettings() + + assert settings.stop_sequences is None + assert settings.response_mime_type is None + assert settings.response_schema is None + assert settings.candidate_count is None + assert settings.max_output_tokens is None + assert settings.temperature is None + assert settings.top_p is None + assert settings.top_k is None + + +def test_custom_vertex_ai_prompt_execution_settings(): + settings = VertexAIPromptExecutionSettings( + stop_sequences=["world"], + response_mime_type="text/plain", + candidate_count=1, + max_output_tokens=128, + temperature=0.5, + top_p=0.5, + top_k=10, + ) + + assert settings.stop_sequences == ["world"] + assert settings.response_mime_type == "text/plain" + assert settings.candidate_count == 1 + assert settings.max_output_tokens == 128 + assert settings.temperature == 0.5 + assert settings.top_p == 0.5 + assert settings.top_k == 10 + + +def test_vertex_ai_prompt_execution_settings_from_default_completion_config(): + settings = PromptExecutionSettings(service_id="test_service") + chat_settings = VertexAIChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.service_id == "test_service" + assert chat_settings.stop_sequences is None + assert chat_settings.response_mime_type is None + assert chat_settings.response_schema is None + assert chat_settings.candidate_count is None + assert chat_settings.max_output_tokens is None + assert chat_settings.temperature is None + assert chat_settings.top_p is None + assert chat_settings.top_k is None + + +def test_vertex_ai_prompt_execution_settings_from_openai_prompt_execution_settings(): + chat_settings = VertexAIChatPromptExecutionSettings(service_id="test_service", temperature=1.0) + new_settings = VertexAIPromptExecutionSettings(service_id="test_2", temperature=0.0) + chat_settings.update_from_prompt_execution_settings(new_settings) + + assert chat_settings.service_id == "test_2" + assert chat_settings.temperature == 0.0 + + +def test_vertex_ai_prompt_execution_settings_from_custom_completion_config(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "stop_sequences": ["world"], + "response_mime_type": "text/plain", + "candidate_count": 1, + "max_output_tokens": 128, + "temperature": 0.5, + "top_p": 0.5, + "top_k": 10, + }, + ) + chat_settings = VertexAIChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.stop_sequences == ["world"] + assert chat_settings.response_mime_type == "text/plain" + assert chat_settings.candidate_count == 1 + assert chat_settings.max_output_tokens == 128 + assert chat_settings.temperature == 0.5 + assert chat_settings.top_p == 0.5 + assert chat_settings.top_k == 10 + + +def test_vertex_ai_chat_prompt_execution_settings_from_custom_completion_config_with_functions(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "tools": [], + }, + ) + chat_settings = VertexAIChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.tools == [] + + +def test_create_options(): + settings = VertexAIChatPromptExecutionSettings( + service_id="test_service", + extension_data={ + "stop_sequences": ["world"], + "response_mime_type": "text/plain", + "candidate_count": 1, + "max_output_tokens": 128, + "temperature": 0.5, + "top_p": 0.5, + "top_k": 10, + }, + ) + options = settings.prepare_settings_dict() + + assert options["stop_sequences"] == ["world"] + assert options["response_mime_type"] == "text/plain" + assert options["candidate_count"] == 1 + assert options["max_output_tokens"] == 128 + assert options["temperature"] == 0.5 + assert options["top_p"] == 0.5 + assert options["top_k"] == 10 + assert "tools" not in options + assert "tool_config" not in options + + +def test_default_vertex_ai_embedding_prompt_execution_settings(): + settings = VertexAIEmbeddingPromptExecutionSettings() + + assert settings.output_dimensionality is None + assert settings.auto_truncate is None diff --git a/python/tests/unit/connectors/ai/ollama/test_ollama_request_settings.py b/python/tests/unit/connectors/ai/ollama/test_ollama_request_settings.py new file mode 100644 index 000000000000..a446f4f43438 --- /dev/null +++ b/python/tests/unit/connectors/ai/ollama/test_ollama_request_settings.py @@ -0,0 +1,95 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.connectors.ai.ollama import OllamaPromptExecutionSettings +from semantic_kernel.connectors.ai.ollama.ollama_prompt_execution_settings import ( + OllamaChatPromptExecutionSettings, + OllamaTextPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + + +def test_default_ollama_prompt_execution_settings(): + settings = OllamaPromptExecutionSettings() + + assert settings.format is None + assert settings.options is None + + +def test_custom_ollama_prompt_execution_settings(): + settings = OllamaPromptExecutionSettings( + format="json", + options={ + "key": "value", + }, + ) + + assert settings.format == "json" + assert settings.options == {"key": "value"} + + +def test_ollama_prompt_execution_settings_from_default_completion_config(): + settings = PromptExecutionSettings(service_id="test_service") + chat_settings = OllamaChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.service_id == "test_service" + assert chat_settings.format is None + assert chat_settings.options is None + + +def test_ollama_prompt_execution_settings_from_openai_prompt_execution_settings(): + chat_settings = OllamaChatPromptExecutionSettings(service_id="test_service", options={"temperature": 0.5}) + new_settings = OllamaPromptExecutionSettings(service_id="test_2", options={"temperature": 0.0}) + chat_settings.update_from_prompt_execution_settings(new_settings) + + assert chat_settings.service_id == "test_2" + assert chat_settings.options["temperature"] == 0.0 + + +def test_ollama_prompt_execution_settings_from_custom_completion_config(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "format": "json", + "options": { + "key": "value", + }, + }, + ) + chat_settings = OllamaChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.service_id == "test_service" + assert chat_settings.format == "json" + assert chat_settings.options == {"key": "value"} + + +def test_ollama_chat_prompt_execution_settings_from_custom_completion_config_with_functions(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "tools": [{"function": {}}], + }, + ) + chat_settings = OllamaChatPromptExecutionSettings.from_prompt_execution_settings(settings) + + assert chat_settings.tools == [{"function": {}}] + + +def test_create_options(): + settings = OllamaChatPromptExecutionSettings( + service_id="test_service", + extension_data={ + "format": "json", + }, + ) + options = settings.prepare_settings_dict() + + assert options["format"] == "json" + + +def test_default_ollama_text_prompt_execution_settings(): + settings = OllamaTextPromptExecutionSettings() + + assert settings.system is None + assert settings.template is None + assert settings.context is None + assert settings.raw is None