2023-08-24 17:01:34 +00:00
|
|
|
from datetime import datetime
|
|
|
|
from pathlib import Path
|
2023-08-14 11:37:20 +00:00
|
|
|
from unittest.mock import patch
|
2023-08-24 17:01:34 +00:00
|
|
|
from zoneinfo import ZoneInfo
|
2023-08-14 11:37:20 +00:00
|
|
|
|
2023-08-24 14:23:32 +00:00
|
|
|
import pytest
|
2024-01-23 22:36:41 +00:00
|
|
|
from openai.types.chat import ChatCompletion, ChatCompletionMessage
|
|
|
|
from openai.types.chat.chat_completion import Choice
|
|
|
|
from openai.types.completion_usage import CompletionUsage
|
2023-08-24 14:23:32 +00:00
|
|
|
|
2023-08-24 17:01:34 +00:00
|
|
|
from llm_chat.chat import Chat, save_conversation
|
|
|
|
from llm_chat.models import Conversation, Message, Role
|
2023-08-24 14:23:32 +00:00
|
|
|
from llm_chat.settings import Model, OpenAISettings
|
2023-08-14 11:37:20 +00:00
|
|
|
|
|
|
|
|
2023-09-14 20:28:29 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,expected_filename",
|
|
|
|
[("", "20210101120000.json"), ("foo", "20210101120000-foo.json")],
|
|
|
|
)
|
2023-09-14 16:58:40 +00:00
|
|
|
def test_save_conversation(name: str, expected_filename: str, tmp_path: Path) -> None:
|
2023-08-24 17:01:34 +00:00
|
|
|
conversation = Conversation(
|
|
|
|
messages=[
|
|
|
|
Message(role=Role.SYSTEM, content="Hello!"),
|
|
|
|
Message(role=Role.USER, content="Hi!"),
|
|
|
|
Message(role=Role.ASSISTANT, content="How are you?"),
|
|
|
|
],
|
|
|
|
model=Model.GPT3,
|
|
|
|
temperature=0.5,
|
|
|
|
completion_tokens=10,
|
|
|
|
prompt_tokens=15,
|
|
|
|
cost=0.000043,
|
2023-09-14 16:58:40 +00:00
|
|
|
name=name,
|
2023-08-24 17:01:34 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
path = tmp_path / ".history"
|
2023-09-14 16:58:40 +00:00
|
|
|
expected_file_path = path / expected_filename
|
2023-08-24 17:01:34 +00:00
|
|
|
dt = datetime(2021, 1, 1, 12, 0, 0, tzinfo=ZoneInfo("UTC"))
|
|
|
|
|
|
|
|
assert not path.exists()
|
|
|
|
|
|
|
|
save_conversation(
|
|
|
|
conversation=conversation,
|
|
|
|
history_dir=path,
|
|
|
|
dt=dt,
|
|
|
|
)
|
|
|
|
|
|
|
|
assert path.exists()
|
|
|
|
assert path.is_dir()
|
|
|
|
assert expected_file_path in path.iterdir()
|
|
|
|
|
|
|
|
with expected_file_path.open() as f:
|
|
|
|
conversation_from_file = Conversation.model_validate_json(f.read())
|
|
|
|
|
|
|
|
assert conversation == conversation_from_file
|
|
|
|
|
|
|
|
|
2023-08-24 17:35:27 +00:00
|
|
|
def test_load(tmp_path: Path) -> None:
|
|
|
|
# Create a conversation object to save
|
|
|
|
conversation = Conversation(
|
|
|
|
messages=[
|
|
|
|
Message(role=Role.SYSTEM, content="Hello!"),
|
|
|
|
Message(role=Role.USER, content="Hi!"),
|
|
|
|
Message(role=Role.ASSISTANT, content="How are you?"),
|
|
|
|
],
|
|
|
|
model=Model.GPT3,
|
|
|
|
temperature=0.5,
|
|
|
|
completion_tokens=10,
|
|
|
|
prompt_tokens=15,
|
|
|
|
cost=0.000043,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Save the conversation to a file
|
|
|
|
file_path = tmp_path / "conversation.json"
|
|
|
|
with file_path.open("w") as f:
|
|
|
|
f.write(conversation.model_dump_json())
|
|
|
|
|
|
|
|
# Load the conversation from the file
|
|
|
|
loaded_chat = Chat.load(file_path, api_key="foo", history_dir=tmp_path)
|
|
|
|
|
|
|
|
# Check that the loaded conversation matches the original conversation
|
|
|
|
assert loaded_chat.settings.model == conversation.model
|
|
|
|
assert loaded_chat.settings.temperature == conversation.temperature
|
|
|
|
assert loaded_chat.conversation.messages == conversation.messages
|
|
|
|
assert loaded_chat.settings.api_key == "foo"
|
|
|
|
assert loaded_chat.settings.history_dir == tmp_path
|
|
|
|
|
|
|
|
# We don't want to load the tokens or cost from the previous session
|
|
|
|
assert loaded_chat.conversation.completion_tokens == 0
|
|
|
|
assert loaded_chat.conversation.prompt_tokens == 0
|
|
|
|
assert loaded_chat.cost == 0
|
|
|
|
|
|
|
|
|
2023-08-16 09:10:06 +00:00
|
|
|
def test_send_message() -> None:
|
2023-08-14 11:37:20 +00:00
|
|
|
with patch("llm_chat.chat.Chat._make_request") as mock_make_request:
|
2024-01-23 22:36:41 +00:00
|
|
|
mock_make_request.return_value = ChatCompletion(
|
|
|
|
choices=[
|
|
|
|
Choice(
|
|
|
|
message=ChatCompletionMessage(content="Hello!", role="assistant"),
|
|
|
|
finish_reason="stop",
|
|
|
|
index=0,
|
|
|
|
),
|
|
|
|
],
|
|
|
|
id="foo",
|
|
|
|
created=0,
|
|
|
|
model="gpt-3.5-turbo-0613",
|
|
|
|
object="chat.completion",
|
|
|
|
usage=CompletionUsage(
|
|
|
|
completion_tokens=1,
|
|
|
|
prompt_tokens=1,
|
|
|
|
total_tokens=2,
|
|
|
|
),
|
|
|
|
)
|
2023-08-14 11:37:20 +00:00
|
|
|
conversation = Chat()
|
|
|
|
response = conversation.send_message("Hello")
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert response == "Hello!"
|
2023-08-24 14:23:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("model,cost", [(Model.GPT3, 0.000043), (Model.GPT4, 0.00105)])
|
|
|
|
def test_calculate_cost(model: Model, cost: float) -> None:
|
|
|
|
with patch("llm_chat.chat.Chat._make_request") as mock_make_request:
|
2024-01-23 22:36:41 +00:00
|
|
|
mock_make_request.return_value = ChatCompletion(
|
|
|
|
choices=[
|
|
|
|
Choice(
|
|
|
|
message=ChatCompletionMessage(content="Hello!", role="assistant"),
|
|
|
|
finish_reason="stop",
|
|
|
|
index=0,
|
|
|
|
),
|
|
|
|
],
|
|
|
|
id="foo",
|
|
|
|
created=0,
|
|
|
|
model="gpt-3.5-turbo-0613",
|
|
|
|
object="chat.completion",
|
|
|
|
usage=CompletionUsage(
|
|
|
|
completion_tokens=10,
|
|
|
|
prompt_tokens=15,
|
|
|
|
total_tokens=25,
|
|
|
|
),
|
|
|
|
)
|
2023-08-24 14:23:32 +00:00
|
|
|
settings = OpenAISettings(model=model)
|
|
|
|
conversation = Chat(settings=settings)
|
|
|
|
_ = conversation.send_message("Hello")
|
|
|
|
assert conversation.cost == cost
|