2023-08-14 11:37:20 +00:00
|
|
|
from unittest.mock import patch
|
|
|
|
|
2023-08-24 14:23:32 +00:00
|
|
|
import pytest
|
|
|
|
|
2023-08-14 11:37:20 +00:00
|
|
|
from llm_chat.chat import Chat
|
2023-08-24 14:23:32 +00:00
|
|
|
from llm_chat.settings import Model, OpenAISettings
|
2023-08-14 11:37:20 +00:00
|
|
|
|
|
|
|
|
2023-08-16 09:10:06 +00:00
|
|
|
def test_send_message() -> None:
|
2023-08-14 11:37:20 +00:00
|
|
|
with patch("llm_chat.chat.Chat._make_request") as mock_make_request:
|
2023-08-16 09:10:06 +00:00
|
|
|
mock_make_request.return_value = {
|
2023-08-24 14:23:32 +00:00
|
|
|
"choices": [{"message": {"content": "Hello!"}}],
|
|
|
|
"usage": {"completion_tokens": 1, "prompt_tokens": 1},
|
2023-08-16 09:10:06 +00:00
|
|
|
}
|
2023-08-14 11:37:20 +00:00
|
|
|
conversation = Chat()
|
|
|
|
response = conversation.send_message("Hello")
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert response == "Hello!"
|
2023-08-24 14:23:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("model,cost", [(Model.GPT3, 0.000043), (Model.GPT4, 0.00105)])
|
|
|
|
def test_calculate_cost(model: Model, cost: float) -> None:
|
|
|
|
with patch("llm_chat.chat.Chat._make_request") as mock_make_request:
|
|
|
|
mock_make_request.return_value = {
|
|
|
|
"choices": [{"message": {"content": "Hello!"}}],
|
|
|
|
"usage": {"completion_tokens": 10, "prompt_tokens": 15},
|
|
|
|
}
|
|
|
|
settings = OpenAISettings(model=model)
|
|
|
|
conversation = Chat(settings=settings)
|
|
|
|
_ = conversation.send_message("Hello")
|
|
|
|
assert conversation.cost == cost
|