llm-chat/tests/test_chat.py

110 lines
3.7 KiB
Python
Raw Normal View History

from datetime import datetime
from pathlib import Path
from unittest.mock import patch
from zoneinfo import ZoneInfo
import pytest
from llm_chat.chat import Chat, save_conversation
from llm_chat.models import Conversation, Message, Role
from llm_chat.settings import Model, OpenAISettings
2023-09-14 16:58:40 +00:00
@pytest.mark.parametrize("name,expected_filename", [("", "20210101120000.json"), ("foo", "20210101120000-foo.json")])
def test_save_conversation(name: str, expected_filename: str, tmp_path: Path) -> None:
conversation = Conversation(
messages=[
Message(role=Role.SYSTEM, content="Hello!"),
Message(role=Role.USER, content="Hi!"),
Message(role=Role.ASSISTANT, content="How are you?"),
],
model=Model.GPT3,
temperature=0.5,
completion_tokens=10,
prompt_tokens=15,
cost=0.000043,
2023-09-14 16:58:40 +00:00
name=name,
)
path = tmp_path / ".history"
2023-09-14 16:58:40 +00:00
expected_file_path = path / expected_filename
dt = datetime(2021, 1, 1, 12, 0, 0, tzinfo=ZoneInfo("UTC"))
assert not path.exists()
save_conversation(
conversation=conversation,
history_dir=path,
dt=dt,
)
assert path.exists()
assert path.is_dir()
assert expected_file_path in path.iterdir()
with expected_file_path.open() as f:
conversation_from_file = Conversation.model_validate_json(f.read())
assert conversation == conversation_from_file
def test_load(tmp_path: Path) -> None:
# Create a conversation object to save
conversation = Conversation(
messages=[
Message(role=Role.SYSTEM, content="Hello!"),
Message(role=Role.USER, content="Hi!"),
Message(role=Role.ASSISTANT, content="How are you?"),
],
model=Model.GPT3,
temperature=0.5,
completion_tokens=10,
prompt_tokens=15,
cost=0.000043,
)
# Save the conversation to a file
file_path = tmp_path / "conversation.json"
with file_path.open("w") as f:
f.write(conversation.model_dump_json())
# Load the conversation from the file
loaded_chat = Chat.load(file_path, api_key="foo", history_dir=tmp_path)
# Check that the loaded conversation matches the original conversation
assert loaded_chat.settings.model == conversation.model
assert loaded_chat.settings.temperature == conversation.temperature
assert loaded_chat.conversation.messages == conversation.messages
assert loaded_chat.settings.api_key == "foo"
assert loaded_chat.settings.history_dir == tmp_path
# We don't want to load the tokens or cost from the previous session
assert loaded_chat.conversation.completion_tokens == 0
assert loaded_chat.conversation.prompt_tokens == 0
assert loaded_chat.cost == 0
def test_send_message() -> None:
with patch("llm_chat.chat.Chat._make_request") as mock_make_request:
mock_make_request.return_value = {
"choices": [{"message": {"content": "Hello!"}}],
"usage": {"completion_tokens": 1, "prompt_tokens": 1},
}
conversation = Chat()
response = conversation.send_message("Hello")
assert isinstance(response, str)
assert response == "Hello!"
@pytest.mark.parametrize("model,cost", [(Model.GPT3, 0.000043), (Model.GPT4, 0.00105)])
def test_calculate_cost(model: Model, cost: float) -> None:
with patch("llm_chat.chat.Chat._make_request") as mock_make_request:
mock_make_request.return_value = {
"choices": [{"message": {"content": "Hello!"}}],
"usage": {"completion_tokens": 10, "prompt_tokens": 15},
}
settings = OpenAISettings(model=model)
conversation = Chat(settings=settings)
_ = conversation.send_message("Hello")
assert conversation.cost == cost