Compare commits
2 Commits
5bbd7ee612
...
0015ae4bff
Author | SHA1 | Date |
---|---|---|
Paul Harrison | 0015ae4bff | |
Paul Harrison | c9f01c7dc2 |
|
@ -0,0 +1,44 @@
|
|||
.DEFAULT_GOAL := help
|
||||
SHELL := /bin/bash
|
||||
|
||||
.PHONY: black
|
||||
black: ## Run black formatter
|
||||
@poetry run black src tests;
|
||||
|
||||
.PHONY: black-check
|
||||
black-check: ## Run black formatter
|
||||
@poetry run black src tests --check;
|
||||
|
||||
.PHONY: format
|
||||
format: isort black ## Format to match linting requirements
|
||||
|
||||
.PHONY: help
|
||||
help: ## Show all available commands
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "Usage: make \033[36m<target>\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-13s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST);
|
||||
|
||||
.PHONY: isort
|
||||
isort: ## Run isort formatter
|
||||
@poetry run isort src tests;
|
||||
|
||||
.PHONY: isort-check
|
||||
isort-check: ## Run isort formatter
|
||||
@poetry run isort src tests --check-only;
|
||||
|
||||
.PHONY: mypy
|
||||
mypy: ## Run mypy type checking
|
||||
@poetry run mypy src tests;
|
||||
|
||||
.PHONY: pydocstyle
|
||||
pydocstyle: ## Run docstring linting
|
||||
@poetry run pydocstyle src tests;
|
||||
|
||||
.PHONY: quality
|
||||
quality: ruff mypy isort-check black-check pydocstyle ## Run linting checks
|
||||
|
||||
.PHONY: ruff
|
||||
ruff: ## Run ruff linter
|
||||
@poetry run ruff check src tests;
|
||||
|
||||
.PHONY: test
|
||||
test: ## Run test pipeline
|
||||
@poetry run pytest -x
|
File diff suppressed because it is too large
Load Diff
|
@ -8,7 +8,58 @@ packages = [{include = "llm_chat", from = "src"}]
|
|||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.11,<3.12"
|
||||
openai = "^0.27.8"
|
||||
pydantic = "^2.1.1"
|
||||
pydantic-settings = "^2.0.2"
|
||||
|
||||
[tool.poetry.group.test.dependencies]
|
||||
pytest = "^7.4.0"
|
||||
black = "^23.7.0"
|
||||
isort = "^5.12.0"
|
||||
ruff = "^0.0.284"
|
||||
mypy = "^1.5.0"
|
||||
pydocstyle = "^6.3.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.black]
|
||||
line-length = 88
|
||||
target-version = ['py311']
|
||||
include = '\.pyi?$'
|
||||
|
||||
[tool.isort]
|
||||
multi_line_output = 3
|
||||
profile = "black"
|
||||
src_paths = ["src", "tests"]
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.11"
|
||||
disallow_untyped_calls = false
|
||||
pretty = true
|
||||
show_error_codes = true
|
||||
strict = true
|
||||
warn_unreachable = true
|
||||
|
||||
[tool.pydocstyle]
|
||||
ignore = "D100,D104,D107,D203,D213,D406,D407,D413"
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
addopts = ["--verbose"]
|
||||
pythonpath = ["src"]
|
||||
testpaths = ["tests"]
|
||||
|
||||
[tool.ruff]
|
||||
exclude = [
|
||||
".git",
|
||||
".ruff_cache",
|
||||
".vscode",
|
||||
]
|
||||
fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
|
||||
line-length = 88
|
||||
select = ["E", "F"]
|
||||
target-version = "py311"
|
||||
|
||||
[tool.ruff.mccabe]
|
||||
max-complexity = 10
|
|
@ -0,0 +1,70 @@
|
|||
from typing import Any
|
||||
|
||||
from openai import ChatCompletion
|
||||
from openai.openai_object import OpenAIObject
|
||||
|
||||
from llm_chat.models import Conversation, Message, Role
|
||||
from llm_chat.settings import OpenAISettings
|
||||
|
||||
INITIAL_SYSTEM_MESSAGES = [
|
||||
Message(
|
||||
role=Role.SYSTEM,
|
||||
content=(
|
||||
"Always respond in Markdown formatting, using code blocks with the"
|
||||
"appropriate language tags."
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class Chat:
|
||||
"""Interface class for OpenAI's ChatGPT chat API.
|
||||
|
||||
Arguments:
|
||||
settings (optional): Settings for the chat. Defaults to reading from
|
||||
environment variables.
|
||||
context (optional): Context for the chat. Defaults to an empty list.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, settings: OpenAISettings | None = None, context: list[Message] = []
|
||||
) -> None:
|
||||
self._settings = settings
|
||||
self.conversation = Conversation(
|
||||
messages=INITIAL_SYSTEM_MESSAGES + context,
|
||||
)
|
||||
|
||||
@property
|
||||
def settings(self) -> OpenAISettings:
|
||||
"""Get OpenAI chat settings."""
|
||||
if self._settings is None:
|
||||
self._settings = OpenAISettings()
|
||||
return self._settings
|
||||
|
||||
def _make_request(self, message: str) -> dict[str, Any]:
|
||||
"""Send a request to the OpenAI API.
|
||||
|
||||
TODO: Add error handling.
|
||||
TODO: Pop message from messages if error occurs.
|
||||
"""
|
||||
self.conversation.messages.append(Message(role=Role.USER, content=message))
|
||||
|
||||
response: OpenAIObject = ChatCompletion.create(
|
||||
model=self.settings.model,
|
||||
temperature=self.settings.temperature,
|
||||
api_key=self.settings.api_key,
|
||||
messages=self.conversation.model_dump()["messages"],
|
||||
)
|
||||
|
||||
out: dict[str, Any] = response.to_dict()
|
||||
return out
|
||||
|
||||
def send_message(self, message: str) -> str:
|
||||
"""Send a message to the assistant.
|
||||
|
||||
TODO: Add error handling.
|
||||
"""
|
||||
response = self._make_request(message)
|
||||
message = response["choices"][0]["message"]["content"]
|
||||
self.conversation.messages.append(Message(role=Role.ASSISTANT, content=message))
|
||||
return message
|
|
@ -0,0 +1,34 @@
|
|||
from enum import StrEnum, auto
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
|
||||
class Role(StrEnum):
|
||||
"""Role of a user in the chat."""
|
||||
|
||||
ASSISTANT = auto()
|
||||
SYSTEM = auto()
|
||||
USER = auto()
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
"""Message in the conversation."""
|
||||
|
||||
role: Role
|
||||
content: str
|
||||
|
||||
model_config: ConfigDict = ConfigDict( # type: ignore[misc]
|
||||
frozen=True,
|
||||
use_enum_values=True,
|
||||
)
|
||||
|
||||
|
||||
class Conversation(BaseModel):
|
||||
"""Conversation in the chat."""
|
||||
|
||||
messages: list[Message]
|
||||
|
||||
model_config: ConfigDict = ConfigDict( # type: ignore[misc]
|
||||
frozen=False,
|
||||
use_enum_values=True,
|
||||
)
|
|
@ -0,0 +1,26 @@
|
|||
from enum import StrEnum
|
||||
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class Model(StrEnum):
|
||||
"""Model to use for the LLM Chat application."""
|
||||
|
||||
GPT3 = "gpt-3.5-turbo"
|
||||
GPT4 = "gpt-4"
|
||||
|
||||
|
||||
class OpenAISettings(BaseSettings):
|
||||
"""Settings for the LLM Chat application."""
|
||||
|
||||
api_key: str = ""
|
||||
model: Model = Model.GPT3
|
||||
temperature: float = 0.7
|
||||
|
||||
model_config: SettingsConfigDict = SettingsConfigDict( # type: ignore[misc]
|
||||
env_file=".env",
|
||||
env_file_encoding="utf-8",
|
||||
env_prefix="OPENAI_",
|
||||
frozen=True,
|
||||
use_enum_values=True,
|
||||
)
|
|
@ -0,0 +1,9 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_openai_api_key() -> None:
|
||||
"""Set a fake OpenAI API key."""
|
||||
os.environ["OPENAI_API_KEY"] = "dummy_key"
|
|
@ -0,0 +1,14 @@
|
|||
from unittest.mock import patch
|
||||
|
||||
from llm_chat.chat import Chat
|
||||
|
||||
|
||||
def test_send_message() -> None:
|
||||
with patch("llm_chat.chat.Chat._make_request") as mock_make_request:
|
||||
mock_make_request.return_value = {
|
||||
"choices": [{"message": {"content": "Hello!"}}]
|
||||
}
|
||||
conversation = Chat()
|
||||
response = conversation.send_message("Hello")
|
||||
assert isinstance(response, str)
|
||||
assert response == "Hello!"
|
Loading…
Reference in New Issue