Litellm bump (#2195)

* ran bump-pydantic

* replace root_validator with model_validator

* mostly working. some alternate assistant error. changed root_validator and typing_extensions

* working generation chat. changed type

* replacing .dict with .model_dump

* argument needed to bring model_dump up to parity with dict()

* fix a fewremaining issues -- working with llama and gpt

* updating requirements file

* more requirement updates

* more requirement updates

* fix to make search work

* return type fix:

* half way tpyes change

* fixes for mypy and pydantic:

* endpoint fix

* fix pydantic protected namespaces

* it works!

* removed unecessary None initializations

* better logging

* changed default values to empty lists

* mypy fixes

* fixed array defaulting

---------

Co-authored-by: hagen-danswer <hagen@danswer.ai>
This commit is contained in:
josvdw
2024-08-27 17:00:27 -07:00
committed by GitHub
parent 657d2050a5
commit 50c17438d5
52 changed files with 230 additions and 223 deletions

View File

@@ -30,13 +30,13 @@ def load_settings() -> EnterpriseSettings:
)
except ConfigNotFoundError:
settings = EnterpriseSettings()
dynamic_config_store.store(KV_ENTERPRISE_SETTINGS_KEY, settings.dict())
dynamic_config_store.store(KV_ENTERPRISE_SETTINGS_KEY, settings.model_dump())
return settings
def store_settings(settings: EnterpriseSettings) -> None:
get_dynamic_config_store().store(KV_ENTERPRISE_SETTINGS_KEY, settings.dict())
get_dynamic_config_store().store(KV_ENTERPRISE_SETTINGS_KEY, settings.model_dump())
_CUSTOM_ANALYTICS_SECRET_KEY = os.environ.get("CUSTOM_ANALYTICS_SECRET_KEY")

View File

@@ -17,7 +17,7 @@ class StandardAnswerRequest(BaseModel):
class StandardAnswerResponse(BaseModel):
standard_answers: list[StandardAnswer] = []
standard_answers: list[StandardAnswer] = Field(default_factory=list)
class DocumentSearchRequest(ChunkContext):

View File

@@ -376,7 +376,7 @@ def get_query_history_as_csv(
# Create an in-memory text stream
stream = io.StringIO()
writer = csv.DictWriter(
stream, fieldnames=list(QuestionAnswerPairSnapshot.__fields__.keys())
stream, fieldnames=list(QuestionAnswerPairSnapshot.model_fields.keys())
)
writer.writeheader()
for row in question_answer_pairs: