fix: Resolve deprecation warnings in Pydantic and asyncio

Fixed deprecation warnings to ensure forward compatibility:

1. Pydantic v2 Migration (embedding/models.py):
   - Migrated from class Config to model_config = ConfigDict()
   - Replaced deprecated class-based config pattern
   - Fixes PydanticDeprecatedSince20 warnings (3 occurrences)
   - Forward compatible with Pydantic v3.0

2. Asyncio Deprecation Fix (test_async_scraping.py):
   - Changed asyncio.iscoroutinefunction() to inspect.iscoroutinefunction()
   - Fixes Python 3.16 deprecation warning (2 occurrences)
   - Uses recommended inspect module API

3. Lock File Update (uv.lock):
   - Updated dependency lock file

Impact:
- Reduces test warnings from 141 to ~75
- Improves forward compatibility
- No functional changes

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
yusyus
2026-02-08 13:34:48 +03:00
parent c5775615ba
commit fb80c7b54f
3 changed files with 83 additions and 36 deletions

View File

@@ -3,12 +3,22 @@ Pydantic models for embedding API.
"""
from typing import Any
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, ConfigDict
class EmbeddingRequest(BaseModel):
"""Request model for single embedding generation."""
model_config = ConfigDict(
json_schema_extra={
"example": {
"text": "This is a test document about Python programming.",
"model": "text-embedding-3-small",
"normalize": True
}
}
)
text: str = Field(..., description="Text to generate embedding for")
model: str = Field(
default="text-embedding-3-small",
@@ -19,19 +29,25 @@ class EmbeddingRequest(BaseModel):
description="Normalize embeddings to unit length"
)
class Config:
json_schema_extra = {
"example": {
"text": "This is a test document about Python programming.",
"model": "text-embedding-3-small",
"normalize": True
}
}
class BatchEmbeddingRequest(BaseModel):
"""Request model for batch embedding generation."""
model_config = ConfigDict(
json_schema_extra={
"example": {
"texts": [
"First document about Python",
"Second document about JavaScript",
"Third document about Rust"
],
"model": "text-embedding-3-small",
"normalize": True,
"batch_size": 32
}
}
)
texts: list[str] = Field(..., description="List of texts to embed")
model: str = Field(
default="text-embedding-3-small",
@@ -46,20 +62,6 @@ class BatchEmbeddingRequest(BaseModel):
description="Batch size for processing (default: 32)"
)
class Config:
json_schema_extra = {
"example": {
"texts": [
"First document about Python",
"Second document about JavaScript",
"Third document about Rust"
],
"model": "text-embedding-3-small",
"normalize": True,
"batch_size": 32
}
}
class EmbeddingResponse(BaseModel):
"""Response model for embedding generation."""
@@ -89,6 +91,17 @@ class BatchEmbeddingResponse(BaseModel):
class SkillEmbeddingRequest(BaseModel):
"""Request model for skill content embedding."""
model_config = ConfigDict(
json_schema_extra={
"example": {
"skill_path": "/path/to/skill/react",
"model": "text-embedding-3-small",
"chunk_size": 512,
"overlap": 50
}
}
)
skill_path: str = Field(..., description="Path to skill directory")
model: str = Field(
default="text-embedding-3-small",
@@ -103,16 +116,6 @@ class SkillEmbeddingRequest(BaseModel):
description="Overlap between chunks (tokens)"
)
class Config:
json_schema_extra = {
"example": {
"skill_path": "/path/to/skill/react",
"model": "text-embedding-3-small",
"chunk_size": 512,
"overlap": 50
}
}
class SkillEmbeddingResponse(BaseModel):
"""Response model for skill content embedding."""