phase 5
This commit is contained in:
1
backend/app/images/__init__.py
Normal file
1
backend/app/images/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Image upload and processing package."""
|
||||
98
backend/app/images/processing.py
Normal file
98
backend/app/images/processing.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""Image processing utilities - thumbnail generation."""
|
||||
|
||||
import contextlib
|
||||
import io
|
||||
from uuid import UUID
|
||||
|
||||
from PIL import Image as PILImage
|
||||
|
||||
from app.core.storage import get_storage_client
|
||||
|
||||
# Thumbnail sizes (width in pixels, height proportional)
|
||||
THUMBNAIL_SIZES = {
|
||||
"low": 800, # For slow connections
|
||||
"medium": 1600, # For medium connections
|
||||
"high": 3200, # For fast connections
|
||||
}
|
||||
|
||||
|
||||
def generate_thumbnails(image_id: UUID, original_path: str, contents: bytes) -> dict[str, str]:
|
||||
"""
|
||||
Generate thumbnails at different resolutions.
|
||||
|
||||
Args:
|
||||
image_id: Image ID for naming thumbnails
|
||||
original_path: Path to original image
|
||||
contents: Original image contents
|
||||
|
||||
Returns:
|
||||
Dictionary mapping quality level to thumbnail storage path
|
||||
"""
|
||||
storage = get_storage_client()
|
||||
thumbnail_paths = {}
|
||||
|
||||
# Load original image
|
||||
image = PILImage.open(io.BytesIO(contents))
|
||||
|
||||
# Convert to RGB if necessary (for JPEG compatibility)
|
||||
if image.mode in ("RGBA", "LA", "P"):
|
||||
# Create white background for transparent images
|
||||
background = PILImage.new("RGB", image.size, (255, 255, 255))
|
||||
if image.mode == "P":
|
||||
image = image.convert("RGBA")
|
||||
background.paste(image, mask=image.split()[-1] if image.mode in ("RGBA", "LA") else None)
|
||||
image = background
|
||||
elif image.mode != "RGB":
|
||||
image = image.convert("RGB")
|
||||
|
||||
# Get original dimensions
|
||||
orig_width, orig_height = image.size
|
||||
|
||||
# Generate thumbnails for each size
|
||||
for quality, max_width in THUMBNAIL_SIZES.items():
|
||||
# Skip if original is smaller than thumbnail size
|
||||
if orig_width <= max_width:
|
||||
thumbnail_paths[quality] = original_path
|
||||
continue
|
||||
|
||||
# Calculate proportional height
|
||||
ratio = max_width / orig_width
|
||||
new_height = int(orig_height * ratio)
|
||||
|
||||
# Resize image
|
||||
thumbnail = image.resize((max_width, new_height), PILImage.Resampling.LANCZOS)
|
||||
|
||||
# Convert to WebP for better compression
|
||||
output = io.BytesIO()
|
||||
thumbnail.save(output, format="WEBP", quality=85, method=6)
|
||||
output.seek(0)
|
||||
|
||||
# Generate storage path
|
||||
thumbnail_path = f"thumbnails/{quality}/{image_id}.webp"
|
||||
|
||||
# Upload to MinIO
|
||||
storage.put_object(
|
||||
bucket_name="webref",
|
||||
object_name=thumbnail_path,
|
||||
data=output,
|
||||
length=len(output.getvalue()),
|
||||
content_type="image/webp",
|
||||
)
|
||||
|
||||
thumbnail_paths[quality] = thumbnail_path
|
||||
|
||||
return thumbnail_paths
|
||||
|
||||
|
||||
async def delete_thumbnails(thumbnail_paths: dict[str, str]) -> None:
|
||||
"""
|
||||
Delete thumbnails from storage.
|
||||
|
||||
Args:
|
||||
thumbnail_paths: Dictionary of quality -> path
|
||||
"""
|
||||
storage = get_storage_client()
|
||||
for path in thumbnail_paths.values():
|
||||
with contextlib.suppress(Exception):
|
||||
# Log error but continue
|
||||
storage.remove_object(bucket_name="webref", object_name=path)
|
||||
223
backend/app/images/repository.py
Normal file
223
backend/app/images/repository.py
Normal file
@@ -0,0 +1,223 @@
|
||||
"""Image repository for database operations."""
|
||||
|
||||
from collections.abc import Sequence
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.database.models.board_image import BoardImage
|
||||
from app.database.models.image import Image
|
||||
|
||||
|
||||
class ImageRepository:
|
||||
"""Repository for image database operations."""
|
||||
|
||||
def __init__(self, db: AsyncSession):
|
||||
"""Initialize repository with database session."""
|
||||
self.db = db
|
||||
|
||||
async def create_image(
|
||||
self,
|
||||
user_id: UUID,
|
||||
filename: str,
|
||||
storage_path: str,
|
||||
file_size: int,
|
||||
mime_type: str,
|
||||
width: int,
|
||||
height: int,
|
||||
metadata: dict,
|
||||
) -> Image:
|
||||
"""
|
||||
Create new image record.
|
||||
|
||||
Args:
|
||||
user_id: Owner user ID
|
||||
filename: Original filename
|
||||
storage_path: Path in MinIO
|
||||
file_size: File size in bytes
|
||||
mime_type: MIME type
|
||||
width: Image width in pixels
|
||||
height: Image height in pixels
|
||||
metadata: Additional metadata (format, checksum, thumbnails, etc)
|
||||
|
||||
Returns:
|
||||
Created Image instance
|
||||
"""
|
||||
image = Image(
|
||||
user_id=user_id,
|
||||
filename=filename,
|
||||
storage_path=storage_path,
|
||||
file_size=file_size,
|
||||
mime_type=mime_type,
|
||||
width=width,
|
||||
height=height,
|
||||
metadata=metadata,
|
||||
)
|
||||
self.db.add(image)
|
||||
await self.db.commit()
|
||||
await self.db.refresh(image)
|
||||
return image
|
||||
|
||||
async def get_image_by_id(self, image_id: UUID) -> Image | None:
|
||||
"""
|
||||
Get image by ID.
|
||||
|
||||
Args:
|
||||
image_id: Image ID
|
||||
|
||||
Returns:
|
||||
Image instance or None
|
||||
"""
|
||||
result = await self.db.execute(select(Image).where(Image.id == image_id))
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
async def get_user_images(self, user_id: UUID, limit: int = 50, offset: int = 0) -> tuple[Sequence[Image], int]:
|
||||
"""
|
||||
Get all images for a user with pagination.
|
||||
|
||||
Args:
|
||||
user_id: User ID
|
||||
limit: Maximum number of images to return
|
||||
offset: Number of images to skip
|
||||
|
||||
Returns:
|
||||
Tuple of (images, total_count)
|
||||
"""
|
||||
# Get total count
|
||||
count_result = await self.db.execute(select(Image).where(Image.user_id == user_id))
|
||||
total = len(count_result.scalars().all())
|
||||
|
||||
# Get paginated results
|
||||
result = await self.db.execute(
|
||||
select(Image).where(Image.user_id == user_id).order_by(Image.created_at.desc()).limit(limit).offset(offset)
|
||||
)
|
||||
images = result.scalars().all()
|
||||
|
||||
return images, total
|
||||
|
||||
async def delete_image(self, image_id: UUID) -> bool:
|
||||
"""
|
||||
Delete image record.
|
||||
|
||||
Args:
|
||||
image_id: Image ID
|
||||
|
||||
Returns:
|
||||
True if deleted, False if not found
|
||||
"""
|
||||
image = await self.get_image_by_id(image_id)
|
||||
if not image:
|
||||
return False
|
||||
|
||||
await self.db.delete(image)
|
||||
await self.db.commit()
|
||||
return True
|
||||
|
||||
async def increment_reference_count(self, image_id: UUID) -> None:
|
||||
"""
|
||||
Increment reference count for image.
|
||||
|
||||
Args:
|
||||
image_id: Image ID
|
||||
"""
|
||||
image = await self.get_image_by_id(image_id)
|
||||
if image:
|
||||
image.reference_count += 1
|
||||
await self.db.commit()
|
||||
|
||||
async def decrement_reference_count(self, image_id: UUID) -> int:
|
||||
"""
|
||||
Decrement reference count for image.
|
||||
|
||||
Args:
|
||||
image_id: Image ID
|
||||
|
||||
Returns:
|
||||
New reference count
|
||||
"""
|
||||
image = await self.get_image_by_id(image_id)
|
||||
if image and image.reference_count > 0:
|
||||
image.reference_count -= 1
|
||||
await self.db.commit()
|
||||
return image.reference_count
|
||||
return 0
|
||||
|
||||
async def add_image_to_board(
|
||||
self,
|
||||
board_id: UUID,
|
||||
image_id: UUID,
|
||||
position: dict,
|
||||
transformations: dict,
|
||||
z_order: int = 0,
|
||||
) -> BoardImage:
|
||||
"""
|
||||
Add image to board.
|
||||
|
||||
Args:
|
||||
board_id: Board ID
|
||||
image_id: Image ID
|
||||
position: Canvas position {x, y}
|
||||
transformations: Image transformations
|
||||
z_order: Layer order
|
||||
|
||||
Returns:
|
||||
Created BoardImage instance
|
||||
"""
|
||||
board_image = BoardImage(
|
||||
board_id=board_id,
|
||||
image_id=image_id,
|
||||
position=position,
|
||||
transformations=transformations,
|
||||
z_order=z_order,
|
||||
)
|
||||
self.db.add(board_image)
|
||||
|
||||
# Increment reference count
|
||||
await self.increment_reference_count(image_id)
|
||||
|
||||
await self.db.commit()
|
||||
await self.db.refresh(board_image)
|
||||
return board_image
|
||||
|
||||
async def get_board_images(self, board_id: UUID) -> Sequence[BoardImage]:
|
||||
"""
|
||||
Get all images for a board, ordered by z-order.
|
||||
|
||||
Args:
|
||||
board_id: Board ID
|
||||
|
||||
Returns:
|
||||
List of BoardImage instances
|
||||
"""
|
||||
result = await self.db.execute(
|
||||
select(BoardImage).where(BoardImage.board_id == board_id).order_by(BoardImage.z_order.asc())
|
||||
)
|
||||
return result.scalars().all()
|
||||
|
||||
async def remove_image_from_board(self, board_id: UUID, image_id: UUID) -> bool:
|
||||
"""
|
||||
Remove image from board.
|
||||
|
||||
Args:
|
||||
board_id: Board ID
|
||||
image_id: Image ID
|
||||
|
||||
Returns:
|
||||
True if removed, False if not found
|
||||
"""
|
||||
result = await self.db.execute(
|
||||
select(BoardImage).where(BoardImage.board_id == board_id, BoardImage.image_id == image_id)
|
||||
)
|
||||
board_image = result.scalar_one_or_none()
|
||||
|
||||
if not board_image:
|
||||
return False
|
||||
|
||||
await self.db.delete(board_image)
|
||||
|
||||
# Decrement reference count
|
||||
await self.decrement_reference_count(image_id)
|
||||
|
||||
await self.db.commit()
|
||||
return True
|
||||
112
backend/app/images/schemas.py
Normal file
112
backend/app/images/schemas.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Image schemas for request/response validation."""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
||||
class ImageMetadata(BaseModel):
|
||||
"""Image metadata structure."""
|
||||
|
||||
format: str = Field(..., description="Image format (jpeg, png, etc)")
|
||||
checksum: str = Field(..., description="SHA256 checksum of file")
|
||||
exif: dict[str, Any] | None = Field(None, description="EXIF data if available")
|
||||
thumbnails: dict[str, str] = Field(default_factory=dict, description="Thumbnail URLs by quality level")
|
||||
|
||||
|
||||
class ImageUploadResponse(BaseModel):
|
||||
"""Response after successful image upload."""
|
||||
|
||||
id: UUID
|
||||
filename: str
|
||||
storage_path: str
|
||||
file_size: int
|
||||
mime_type: str
|
||||
width: int
|
||||
height: int
|
||||
metadata: dict[str, Any]
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class ImageResponse(BaseModel):
|
||||
"""Full image response with all fields."""
|
||||
|
||||
id: UUID
|
||||
user_id: UUID
|
||||
filename: str
|
||||
storage_path: str
|
||||
file_size: int
|
||||
mime_type: str
|
||||
width: int
|
||||
height: int
|
||||
metadata: dict[str, Any]
|
||||
created_at: datetime
|
||||
reference_count: int
|
||||
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class BoardImageCreate(BaseModel):
|
||||
"""Schema for adding image to board."""
|
||||
|
||||
image_id: UUID = Field(..., description="ID of uploaded image")
|
||||
position: dict[str, float] = Field(default_factory=lambda: {"x": 0, "y": 0}, description="Canvas position")
|
||||
transformations: dict[str, Any] = Field(
|
||||
default_factory=lambda: {
|
||||
"scale": 1.0,
|
||||
"rotation": 0,
|
||||
"opacity": 1.0,
|
||||
"flipped_h": False,
|
||||
"flipped_v": False,
|
||||
"greyscale": False,
|
||||
},
|
||||
description="Image transformations",
|
||||
)
|
||||
z_order: int = Field(default=0, description="Layer order")
|
||||
|
||||
@field_validator("position")
|
||||
@classmethod
|
||||
def validate_position(cls, v: dict[str, float]) -> dict[str, float]:
|
||||
"""Validate position has x and y."""
|
||||
if "x" not in v or "y" not in v:
|
||||
raise ValueError("Position must contain 'x' and 'y' coordinates")
|
||||
return v
|
||||
|
||||
|
||||
class BoardImageResponse(BaseModel):
|
||||
"""Response for board image with all metadata."""
|
||||
|
||||
id: UUID
|
||||
board_id: UUID
|
||||
image_id: UUID
|
||||
position: dict[str, float]
|
||||
transformations: dict[str, Any]
|
||||
z_order: int
|
||||
group_id: UUID | None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
image: ImageResponse
|
||||
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class ImageListResponse(BaseModel):
|
||||
"""Paginated list of images."""
|
||||
|
||||
images: list[ImageResponse]
|
||||
total: int
|
||||
page: int
|
||||
page_size: int
|
||||
86
backend/app/images/upload.py
Normal file
86
backend/app/images/upload.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""Image upload handler with streaming to MinIO."""
|
||||
|
||||
import contextlib
|
||||
import hashlib
|
||||
import io
|
||||
from uuid import UUID
|
||||
|
||||
from PIL import Image as PILImage
|
||||
|
||||
from app.core.storage import get_storage_client
|
||||
|
||||
|
||||
async def upload_image_to_storage(
|
||||
user_id: UUID, image_id: UUID, filename: str, contents: bytes
|
||||
) -> tuple[str, int, int, str]:
|
||||
"""
|
||||
Upload image to MinIO storage.
|
||||
|
||||
Args:
|
||||
user_id: User ID for organizing storage
|
||||
image_id: Image ID for unique naming
|
||||
filename: Original filename
|
||||
contents: Image file contents
|
||||
|
||||
Returns:
|
||||
Tuple of (storage_path, width, height, mime_type)
|
||||
"""
|
||||
# Get storage client
|
||||
storage = get_storage_client()
|
||||
|
||||
# Generate storage path: originals/{user_id}/{image_id}.{ext}
|
||||
extension = filename.split(".")[-1].lower()
|
||||
storage_path = f"originals/{user_id}/{image_id}.{extension}"
|
||||
|
||||
# Detect image dimensions and format
|
||||
image = PILImage.open(io.BytesIO(contents))
|
||||
width, height = image.size
|
||||
format_name = image.format.lower() if image.format else extension
|
||||
|
||||
# Map PIL format to MIME type
|
||||
mime_type_map = {
|
||||
"jpeg": "image/jpeg",
|
||||
"jpg": "image/jpeg",
|
||||
"png": "image/png",
|
||||
"gif": "image/gif",
|
||||
"webp": "image/webp",
|
||||
"svg": "image/svg+xml",
|
||||
}
|
||||
mime_type = mime_type_map.get(format_name, f"image/{format_name}")
|
||||
|
||||
# Upload to MinIO
|
||||
storage.put_object(
|
||||
bucket_name="webref",
|
||||
object_name=storage_path,
|
||||
data=io.BytesIO(contents),
|
||||
length=len(contents),
|
||||
content_type=mime_type,
|
||||
)
|
||||
|
||||
return storage_path, width, height, mime_type
|
||||
|
||||
|
||||
def calculate_checksum(contents: bytes) -> str:
|
||||
"""
|
||||
Calculate SHA256 checksum of file contents.
|
||||
|
||||
Args:
|
||||
contents: File contents
|
||||
|
||||
Returns:
|
||||
SHA256 checksum as hex string
|
||||
"""
|
||||
return hashlib.sha256(contents).hexdigest()
|
||||
|
||||
|
||||
async def delete_image_from_storage(storage_path: str) -> None:
|
||||
"""
|
||||
Delete image from MinIO storage.
|
||||
|
||||
Args:
|
||||
storage_path: Path to image in storage
|
||||
"""
|
||||
storage = get_storage_client()
|
||||
with contextlib.suppress(Exception):
|
||||
# Log error but don't fail - image might already be deleted
|
||||
storage.remove_object(bucket_name="webref", object_name=storage_path)
|
||||
110
backend/app/images/validation.py
Normal file
110
backend/app/images/validation.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""File validation utilities for image uploads."""
|
||||
|
||||
import magic
|
||||
from fastapi import HTTPException, UploadFile, status
|
||||
|
||||
# Maximum file size: 50MB
|
||||
MAX_FILE_SIZE = 52_428_800
|
||||
|
||||
# Allowed MIME types
|
||||
ALLOWED_MIME_TYPES = {
|
||||
"image/jpeg",
|
||||
"image/jpg",
|
||||
"image/png",
|
||||
"image/gif",
|
||||
"image/webp",
|
||||
"image/svg+xml",
|
||||
}
|
||||
|
||||
# Allowed file extensions
|
||||
ALLOWED_EXTENSIONS = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".svg"}
|
||||
|
||||
|
||||
async def validate_image_file(file: UploadFile) -> bytes:
|
||||
"""
|
||||
Validate uploaded image file.
|
||||
|
||||
Checks:
|
||||
- File size within limits
|
||||
- MIME type allowed
|
||||
- Magic bytes match declared type
|
||||
- File extension valid
|
||||
|
||||
Args:
|
||||
file: The uploaded file from FastAPI
|
||||
|
||||
Returns:
|
||||
File contents as bytes
|
||||
|
||||
Raises:
|
||||
HTTPException: If validation fails
|
||||
"""
|
||||
# Read file contents
|
||||
contents = await file.read()
|
||||
file_size = len(contents)
|
||||
|
||||
# Reset file pointer for potential re-reading
|
||||
await file.seek(0)
|
||||
|
||||
# Check file size
|
||||
if file_size == 0:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Empty file uploaded")
|
||||
|
||||
if file_size > MAX_FILE_SIZE:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
|
||||
detail=f"File too large. Maximum size is {MAX_FILE_SIZE / 1_048_576:.1f}MB",
|
||||
)
|
||||
|
||||
# Validate file extension
|
||||
if file.filename:
|
||||
extension = "." + file.filename.lower().split(".")[-1] if "." in file.filename else ""
|
||||
if extension not in ALLOWED_EXTENSIONS:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Invalid file extension. Allowed: {', '.join(ALLOWED_EXTENSIONS)}",
|
||||
)
|
||||
|
||||
# Detect actual MIME type using magic bytes
|
||||
mime = magic.from_buffer(contents, mime=True)
|
||||
|
||||
# Validate MIME type
|
||||
if mime not in ALLOWED_MIME_TYPES:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Invalid file type '{mime}'. Allowed types: {', '.join(ALLOWED_MIME_TYPES)}",
|
||||
)
|
||||
|
||||
return contents
|
||||
|
||||
|
||||
def sanitize_filename(filename: str) -> str:
|
||||
"""
|
||||
Sanitize filename to prevent path traversal and other attacks.
|
||||
|
||||
Args:
|
||||
filename: Original filename
|
||||
|
||||
Returns:
|
||||
Sanitized filename
|
||||
"""
|
||||
import re
|
||||
|
||||
# Remove path separators
|
||||
filename = filename.replace("/", "_").replace("\\", "_")
|
||||
|
||||
# Remove any non-alphanumeric characters except dots, dashes, underscores
|
||||
filename = re.sub(r"[^a-zA-Z0-9._-]", "_", filename)
|
||||
|
||||
# Limit length
|
||||
max_length = 255
|
||||
if len(filename) > max_length:
|
||||
# Keep extension
|
||||
parts = filename.rsplit(".", 1)
|
||||
if len(parts) == 2:
|
||||
name, ext = parts
|
||||
filename = name[: max_length - len(ext) - 1] + "." + ext
|
||||
else:
|
||||
filename = filename[:max_length]
|
||||
|
||||
return filename
|
||||
73
backend/app/images/zip_handler.py
Normal file
73
backend/app/images/zip_handler.py
Normal file
@@ -0,0 +1,73 @@
|
||||
"""ZIP file extraction handler for batch image uploads."""
|
||||
|
||||
import io
|
||||
import zipfile
|
||||
from collections.abc import AsyncIterator
|
||||
|
||||
from fastapi import HTTPException, UploadFile, status
|
||||
|
||||
|
||||
async def extract_images_from_zip(zip_file: UploadFile) -> AsyncIterator[tuple[str, bytes]]:
|
||||
"""
|
||||
Extract image files from ZIP archive.
|
||||
|
||||
Args:
|
||||
zip_file: Uploaded ZIP file
|
||||
|
||||
Yields:
|
||||
Tuples of (filename, contents) for each image file
|
||||
|
||||
Raises:
|
||||
HTTPException: If ZIP is invalid or too large
|
||||
"""
|
||||
# Read ZIP contents
|
||||
zip_contents = await zip_file.read()
|
||||
|
||||
# Check ZIP size (max 200MB for ZIP)
|
||||
max_zip_size = 200 * 1024 * 1024 # 200MB
|
||||
if len(zip_contents) > max_zip_size:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
|
||||
detail=f"ZIP file too large. Maximum size is {max_zip_size / 1_048_576:.1f}MB",
|
||||
)
|
||||
|
||||
try:
|
||||
# Open ZIP file
|
||||
with zipfile.ZipFile(io.BytesIO(zip_contents)) as zip_ref:
|
||||
# Get list of image files (filter by extension)
|
||||
image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".svg"}
|
||||
image_files = [
|
||||
name
|
||||
for name in zip_ref.namelist()
|
||||
if not name.startswith("__MACOSX/") # Skip macOS metadata
|
||||
and not name.startswith(".") # Skip hidden files
|
||||
and any(name.lower().endswith(ext) for ext in image_extensions)
|
||||
]
|
||||
|
||||
if not image_files:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="No valid image files found in ZIP archive",
|
||||
)
|
||||
|
||||
# Extract each image
|
||||
for filename in image_files:
|
||||
# Skip directories
|
||||
if filename.endswith("/"):
|
||||
continue
|
||||
|
||||
# Get just the filename without path
|
||||
base_filename = filename.split("/")[-1]
|
||||
|
||||
# Read file contents
|
||||
file_contents = zip_ref.read(filename)
|
||||
|
||||
yield base_filename, file_contents
|
||||
|
||||
except zipfile.BadZipFile as e:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid ZIP file") from e
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Error processing ZIP file: {str(e)}",
|
||||
) from e
|
||||
Reference in New Issue
Block a user