Skip to main content

Python (OpenAI SDK)

Use the official OpenAI Python SDK with Eden AI to access 200+ AI models through a familiar interface.

Overview

The OpenAI Python SDK is fully compatible with Eden AI’s V3 API. Simply point the SDK to Eden AI’s endpoint and you can access models from OpenAI, Anthropic, Google, Cohere, Meta, and more.

Installation

Install the OpenAI Python SDK:
pip install openai

Quick Start

Configure the OpenAI client to use Eden AI:
from openai import OpenAI

# Initialize client with Eden AI endpoint

client = OpenAI(
api_key="YOUR_EDEN_AI_API_KEY", # Get from https://app.edenai.run
base_url="https://api.edenai.run/v3/llm"
)

# Make a request

response = client.chat.completions.create(
model="openai/gpt-4",
messages=[
{"role": "user", "content": "Hello! How are you?"}
]
)

# Print the response

print(response.choices[0].message.content)

Available Models

Access models from multiple providers using the provider/model format:

OpenAI

  • openai/gpt-4
  • openai/gpt-4-turbo
  • openai/gpt-4o
  • openai/gpt-3.5-turbo

Anthropic

  • anthropic/claude-sonnet-4-5
  • anthropic/claude-opus-4-5
  • anthropic/claude-sonnet-4-5
  • anthropic/claude-haiku-4-5

Google

  • google/gemini-pro
  • google/gemini-1.5-pro
  • google/gemini-1.5-flash

Cohere

  • cohere/command-r-plus
  • cohere/command-r

Meta

  • meta/llama-3-70b
  • meta/llama-3-8b

Multi-Turn Conversations

Build conversational applications with message history:
from openai import OpenAI

client = OpenAI(
    api_key="YOUR_EDEN_AI_API_KEY",
    base_url="https://api.edenai.run/v3/llm"
)

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "What is the capital of France?"}
]

# First interaction
response = client.chat.completions.create(
    model="anthropic/claude-sonnet-4-5",
    messages=messages
)

assistant_response = response.choices[0].message.content
print(assistant_response)

# Add assistant response to history
messages.append({"role": "assistant", "content": assistant_response})

# Continue conversation
messages.append({"role": "user", "content": "What's the population?"})

response = client.chat.completions.create(
    model="anthropic/claude-sonnet-4-5",
    messages=messages
)

print(response.choices[0].message.content)

Advanced Parameters

Control model behavior with standard OpenAI parameters:
from openai import OpenAI

client = OpenAI(
api_key="YOUR_EDEN_AI_API_KEY",
base_url="https://api.edenai.run/v3/llm"
)

response = client.chat.completions.create(
model="openai/gpt-4",
messages=[
{"role": "user", "content": "Write a creative story about AI."}
],
temperature=0.9, # Higher = more creative (0-2)
max_tokens=500, # Limit response length
top_p=1.0, # Nucleus sampling
frequency_penalty=0.0, # Penalize repetition (-2 to 2)
presence_penalty=0.0 # Penalize topic repetition (-2 to 2)
)

print(response.choices[0].message.content)

Vision Capabilities

Send images to vision-capable models:
from openai import OpenAI

client = OpenAI(
    api_key="YOUR_EDEN_AI_API_KEY",
    base_url="https://api.edenai.run/v3/llm"
)

# First, upload the image to get a file_id
import requests

upload_response = requests.post(
    "https://api.edenai.run/v3/upload",
    headers={"Authorization": f"Bearer YOUR_EDEN_AI_API_KEY"},
    files={"file": open("image.jpg", "rb")}
)
file_id = upload_response.json()["file_id"]

# Use the file_id in a chat message
response = client.chat.completions.create(
    model="openai/gpt-4o",
    messages=[
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "What's in this image?"},
                {"type": "file", "file": {"file_id": file_id}}
            ]
        }
    ]
)

print(response.choices[0].message.content)

Async Support

Use async/await for concurrent requests:
from openai import AsyncOpenAI
import asyncio

async def main():
client = AsyncOpenAI(
api_key="YOUR_EDEN_AI_API_KEY",
base_url="https://api.edenai.run/v3/llm"
)

    response = await client.chat.completions.create(
        model="openai/gpt-4",
        messages=[
            {"role": "user", "content": "Hello!"}
        ]
    )

    print(response.choices[0].message.content)

asyncio.run(main())

Error Handling

Handle API errors gracefully:
from openai import OpenAI, OpenAIError
import openai

client = OpenAI(
    api_key="YOUR_EDEN_AI_API_KEY",
    base_url="https://api.edenai.run/v3/llm"
)

try:
    response = client.chat.completions.create(
        model="openai/gpt-4",
        messages=[{"role": "user", "content": "Hello!"}]
    )

    print(response.choices[0].message.content)

except openai.AuthenticationError as e:
    print(f"Authentication failed: {e}")
except openai.RateLimitError as e:
    print(f"Rate limit exceeded: {e}")
except openai.APIError as e:
    print(f"API error: {e}")
except Exception as e:
    print(f"Unexpected error: {e}")

Complete Example

A full example with conversation management:
from openai import OpenAI
import sys

def chat_with_ai():
client = OpenAI(
api_key="YOUR_EDEN_AI_API_KEY",
base_url="https://api.edenai.run/v3/llm"
)

    messages = [
        {"role": "system", "content": "You are a helpful assistant."}
    ]

    print("Chat with AI (type 'quit' to exit)")
    print("-" * 50)

    while True:
        # Get user input
        user_input = input("\nYou: ").strip()

        if user_input.lower() == 'quit':
            break

        if not user_input:
            continue

        # Add user message
        messages.append({"role": "user", "content": user_input})

        # Get AI response
        response = client.chat.completions.create(
            model="anthropic/claude-sonnet-4-5",
            messages=messages,
            temperature=0.7
        )

        assistant_response = response.choices[0].message.content
        print(f"\nAssistant: {assistant_response}")

        # Add assistant response to history
        messages.append({"role": "assistant", "content": assistant_response})

if **name** == "**main**":
chat_with_ai()

List Available Models

Discover available models programmatically:
import requests

response = requests.get(
    "https://api.edenai.run/v3/llm/models",
    headers={"Authorization": "Bearer YOUR_EDEN_AI_API_KEY"}
)

models = response.json()

# Print models by provider
providers = {}
for model in models["data"]:
    provider = model["owned_by"]
    if provider not in providers:
        providers[provider] = []
    providers[provider].append(model["id"])

for provider, model_list in providers.items():
    print(f"\n{provider}:")
    for model_id in model_list:
        print(f"  - {model_id}")

Environment Variables

Store your API key securely using environment variables:
import os
from openai import OpenAI

client = OpenAI(
api_key=os.getenv("EDEN_AI_API_KEY"),
base_url="https://api.edenai.run/v3/llm"
)

Use with python-dotenv:
from dotenv import load_dotenv
import os
from openai import OpenAI

load_dotenv()

client = OpenAI(
api_key=os.getenv("EDEN_AI_API_KEY"),
base_url="https://api.edenai.run/v3/llm"
)

Troubleshooting

Authentication Errors

Ensure your API key is correct and has the Bearer prefix when using raw requests:
headers = {"Authorization": f"Bearer {api_key}"}

Rate Limiting

If you hit rate limits, implement exponential backoff:
import time
from openai import OpenAI, RateLimitError

client = OpenAI(
api_key="YOUR_EDEN_AI_API_KEY",
base_url="https://api.edenai.run/v3/llm"
)

def create_completion_with_retry(messages, max_retries=3):
for attempt in range(max_retries):
try:
return client.chat.completions.create(
model="openai/gpt-4",
messages=messages
)
except RateLimitError:
if attempt < max_retries - 1:
wait_time = 2 \*\* attempt # Exponential backoff
print(f"Rate limited. Waiting {wait_time}s...")
time.sleep(wait_time)
else:
raise

Next Steps