llm-chat/src/llm_chat/models.py

42 lines
843 B
Python
Raw Normal View History

from enum import StrEnum, auto
from pydantic import BaseModel, ConfigDict
from llm_chat.settings import DEFAULT_TEMPERATURE, Model
class Role(StrEnum):
"""Role of a user in the chat."""
ASSISTANT = auto()
SYSTEM = auto()
USER = auto()
class Message(BaseModel):
"""Message in the conversation."""
role: Role
content: str
model_config: ConfigDict = ConfigDict( # type: ignore[misc]
frozen=True,
use_enum_values=True,
)
class Conversation(BaseModel):
"""Conversation in the chat."""
messages: list[Message]
model: Model
temperature: float = DEFAULT_TEMPERATURE
completion_tokens: int = 0
prompt_tokens: int = 0
cost: float = 0.0
model_config: ConfigDict = ConfigDict( # type: ignore[misc]
frozen=False,
use_enum_values=True,
)