Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions litellm/images/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,7 @@ def image_generation( # noqa: PLR0915
litellm.LlmProviders.GEMINI,
litellm.LlmProviders.FAL_AI,
litellm.LlmProviders.RUNWAYML,
litellm.LlmProviders.VOLCENGINE,
):
if image_generation_config is None:
raise ValueError(
Expand Down
4 changes: 3 additions & 1 deletion litellm/llms/volcengine/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""
Volcengine LLM Provider
Support for Volcengine (ByteDance) chat and embedding models
Support for Volcengine (ByteDance) chat, embedding, and image generation models
"""

from .chat.transformation import VolcEngineChatConfig
Expand All @@ -10,6 +10,7 @@
get_volcengine_headers,
)
from .embedding import VolcEngineEmbeddingConfig
from .image_generation import VolcEngineImageGenerationConfig

# For backward compatibility, keep the old class name
VolcEngineConfig = VolcEngineChatConfig
Expand All @@ -18,6 +19,7 @@
"VolcEngineChatConfig",
"VolcEngineConfig", # backward compatibility
"VolcEngineEmbeddingConfig",
"VolcEngineImageGenerationConfig",
"VolcEngineError",
"get_volcengine_base_url",
"get_volcengine_headers",
Expand Down
26 changes: 26 additions & 0 deletions litellm/llms/volcengine/image_generation/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
"""
Volcengine Image Generation Module
"""

from litellm.llms.base_llm.image_generation.transformation import (
BaseImageGenerationConfig,
)

from .transformation import VolcEngineImageGenerationConfig

__all__ = [
"VolcEngineImageGenerationConfig",
]


def get_volcengine_image_generation_config(model: str) -> BaseImageGenerationConfig:
"""
Get the image generation config for Volcengine models.

Args:
model: The model name

Returns:
VolcEngineImageGenerationConfig instance
"""
return VolcEngineImageGenerationConfig()
267 changes: 267 additions & 0 deletions litellm/llms/volcengine/image_generation/transformation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,267 @@
"""
Volcengine Image Generation Transformation
Transforms OpenAI image generation requests to Volcengine format
"""

from typing import TYPE_CHECKING, Any, List, Optional

import httpx

from litellm.llms.base_llm.image_generation.transformation import (
BaseImageGenerationConfig,
)
from litellm.types.llms.openai import (
AllMessageValues,
OpenAIImageGenerationOptionalParams,
)
from litellm.types.utils import ImageObject, ImageResponse

from ..common_utils import get_volcengine_base_url, get_volcengine_headers

if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj

LiteLLMLoggingObj = _LiteLLMLoggingObj
else:
LiteLLMLoggingObj = Any


class VolcEngineImageGenerationConfig(BaseImageGenerationConfig):
"""
Configuration class for Volcengine image generation models.
Reference: https://www.volcengine.com/docs/82379/1541523
"""

def get_supported_openai_params(
self, model: str
) -> List[OpenAIImageGenerationOptionalParams]:
"""
Get the list of OpenAI parameters supported by Volcengine image generation models.

Based on Volcengine Seedream 4.0 API documentation.

Args:
model: The model name

Returns:
List of supported parameter names
"""
return [
"n",
"response_format",
"size",
"user",
]

def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
"""
Map OpenAI image generation parameters to Volcengine format.

Args:
non_default_params: Parameters that are not default values
optional_params: Optional parameters dict to update
model: The model name
drop_params: Whether to drop unsupported parameters

Returns:
Updated optional_params dict
"""
supported_params = self.get_supported_openai_params(model)

for param, value in non_default_params.items():
if param in optional_params:
continue

if param == "n":
# Volcengine uses 'n' for number of images
optional_params["n"] = value
elif param == "response_format":
# Volcengine supports 'url' and 'b64_json'
if value in ["url", "b64_json"]:
optional_params["response_format"] = value
elif not drop_params:
raise ValueError(
f"Unsupported response_format: {value}. Volcengine supports: url, b64_json"
)
elif param == "size":
# Volcengine uses 'size' parameter, format: "WIDTHxHEIGHT"
# Example: "1024x1024", "512x512", etc.
optional_params["size"] = value
elif param == "user":
# Pass through user parameter for tracking
optional_params["user"] = value
elif param in supported_params:
optional_params[param] = value
elif not drop_params:
raise ValueError(
f"Parameter {param} is not supported for model {model}. "
f"Supported parameters are {supported_params}. "
f"Set drop_params=True to drop unsupported parameters."
)

return optional_params

def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
"""
Get the complete URL for volcengine image generation API calls.

Args:
api_base: Optional custom API base URL
api_key: API key (not used for URL construction)
model: Model name (not used for URL construction)
optional_params: Optional parameters (not used for URL construction)
litellm_params: LiteLLM parameters (not used for URL construction)
stream: Stream parameter (not used for URL construction)

Returns:
Complete URL for the image generation API endpoint
"""
base_url = get_volcengine_base_url(api_base)
# Construct the complete URL with /images/generations endpoint
if base_url.endswith("/api/v3"):
return f"{base_url}/images/generations"
else:
return f"{base_url}/api/v3/images/generations"

def validate_environment(
self,
headers: dict,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
"""Validate environment and return headers"""
# Get Volcengine headers
if api_key is None:
raise ValueError("api_key is required for Volcengine authentication")
volcengine_headers = get_volcengine_headers(api_key)
return {**headers, **volcengine_headers}

def transform_image_generation_request(
self,
model: str,
prompt: str,
optional_params: dict,
litellm_params: dict,
headers: dict,
) -> dict:
"""
Transform OpenAI image generation request to Volcengine format.

Args:
model: The model name
prompt: The text prompt for image generation
optional_params: Optional parameters
litellm_params: LiteLLM parameters
headers: Request headers

Returns:
Volcengine-formatted request body
"""
# Build request body following Volcengine API format
request_body = {
"model": model,
"prompt": prompt,
}

# Add optional parameters
if "n" in optional_params:
request_body["n"] = optional_params["n"]

if "response_format" in optional_params:
request_body["response_format"] = optional_params["response_format"]

if "size" in optional_params:
request_body["size"] = optional_params["size"]

if "user" in optional_params:
request_body["user"] = optional_params["user"]

# Add any other optional parameters that might be supported
for key in ["stream", "watermark"]:
if key in optional_params:
request_body[key] = optional_params[key]

return request_body

def transform_image_generation_response(
self,
model: str,
raw_response: httpx.Response,
model_response: ImageResponse,
logging_obj: LiteLLMLoggingObj,
request_data: dict,
optional_params: dict,
litellm_params: dict,
encoding: Any,
api_key: Optional[str] = None,
json_mode: Optional[bool] = None,
) -> ImageResponse:
"""
Transform Volcengine image generation response to LiteLLM ImageResponse.

Args:
model: The model name
raw_response: Raw HTTP response from Volcengine
model_response: ImageResponse object to populate
logging_obj: Logging object
request_data: Original request data
optional_params: Optional parameters used in request
litellm_params: LiteLLM parameters
encoding: Encoding parameter
api_key: API key used
json_mode: Whether to use JSON mode

Returns:
Populated ImageResponse object
"""
try:
response_json = raw_response.json()
except Exception as e:
raise self.get_error_class(
error_message=f"Failed to parse Volcengine response as JSON: {str(e)}",
status_code=raw_response.status_code,
headers=raw_response.headers,
)

# Initialize data list if not present
if not model_response.data:
model_response.data = []

# Volcengine response format should match OpenAI format
# Extract image data from response
if "data" in response_json:
for image_data in response_json["data"]:
model_response.data.append(
ImageObject(
url=image_data.get("url", None),
b64_json=image_data.get("b64_json", None),
)
)

# Add usage information if present
if "usage" in response_json:
model_response.usage = response_json["usage"]

# Add created timestamp if present
if "created" in response_json:
model_response.created = response_json["created"]

return model_response
15 changes: 15 additions & 0 deletions litellm/model_prices_and_context_window_backup.json
Original file line number Diff line number Diff line change
Expand Up @@ -8623,6 +8623,21 @@
"output_cost_per_token": 0.0,
"output_vector_size": 2560
},
"doubao-seedream-4-0-250828": {
"input_cost_per_token": 0.0,
"litellm_provider": "volcengine",
"max_input_tokens": 4000,
"max_tokens": 4000,
"metadata": {
"notes": "Volcengine Doubao Seedream 4.0 text-to-image generation model. Supports image sizes from 512x512 to 2048x2048"
},
"mode": "image_generation",
"output_cost_per_image": 0.0,
"output_cost_per_token": 0.0,
"supported_endpoints": [
"/v1/images/generations"
]
},
"exa_ai/search": {
"litellm_provider": "exa_ai",
"mode": "search",
Expand Down
6 changes: 6 additions & 0 deletions litellm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7666,6 +7666,12 @@ def get_provider_image_generation_config(
)

return get_runwayml_image_generation_config(model)
elif LlmProviders.VOLCENGINE == provider:
from litellm.llms.volcengine.image_generation import (
get_volcengine_image_generation_config,
)

return get_volcengine_image_generation_config(model)
return None

@staticmethod
Expand Down
15 changes: 15 additions & 0 deletions model_prices_and_context_window.json
Original file line number Diff line number Diff line change
Expand Up @@ -8623,6 +8623,21 @@
"output_cost_per_token": 0.0,
"output_vector_size": 2560
},
"doubao-seedream-4-0-250828": {
"input_cost_per_token": 0.0,
"litellm_provider": "volcengine",
"max_input_tokens": 4000,
"max_tokens": 4000,
"metadata": {
"notes": "Volcengine Doubao Seedream 4.0 text-to-image generation model. Supports image sizes from 512x512 to 2048x2048"
},
"mode": "image_generation",
"output_cost_per_image": 0.0,
"output_cost_per_token": 0.0,
"supported_endpoints": [
"/v1/images/generations"
]
},
"exa_ai/search": {
"litellm_provider": "exa_ai",
"mode": "search",
Expand Down