Skip to main content

Python Client 🐍

Installation 📦

pip install flymyai

Quick Start 🚀

from flymyai import client, FlyMyAIPredictException

# Set your API key
apikey = "fly-***"

# Initialize the client
fma_client = client(apikey=apikey)

# Set the model name
model = "flymyai/hidream-i1-dev"

# Prepare the input data
payload = {
"prompt": "A beautiful sunset over mountains, digital art",
"negative_prompt": "blurry, low quality",
"width": 512,
"height": 512,
"num_inference_steps": 30,
"guidance_scale": 7.5,
"seed": 42
}

try:
# Make the prediction
response = fma_client.predict(
model=model,
payload=payload
)

# Process the output data
print(f"Generated image URL: {response.output_data['image_url']}")
except FlyMyAIPredictException as e:
print(f"Error: {e}")

Advanced Usage 🎯

Synchronous Mode 🔄

Image Generation with HiDream 🖼️

from flymyai import client, FlyMyAIPredictException

apikey = "fly-***"
model = "flymyai/hidream-i1-dev"

payload = {
"prompt": "A futuristic cityscape with flying cars, neon lights, cyberpunk style",
"negative_prompt": "blurry, low quality, distorted",
"width": 768,
"height": 512,
"num_inference_steps": 50,
"guidance_scale": 8.5,
"seed": 12345
}

fma_client = client(apikey=apikey)
try:
response = fma_client.predict(model=model, payload=payload)
print(f"Generated image URL: {response.output_data['image_url']}")
except FlyMyAIPredictException as e:
print(f"Error: {e}")

Video Generation with wan2-img-to-video-lora 🎬

from flymyai import client, FlyMyAIPredictException

apikey = "fly-***"
model = "flymyai/wan2-img-to-video-lora"

payload = {
"prompt": "A cinematic shot of a spaceship flying through a nebula",
"negative_prompt": "blurry, low quality, distorted",
"width": 512,
"height": 512,
"num_frames": 24,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"input_image": "https://example.com/input.jpg",
"lora_url": "https://example.com/model.safetensors",
"fps": 8,
"acceleration_factor": 1
}

fma_client = client(apikey=apikey)
try:
response = fma_client.predict(model=model, payload=payload)
print(f"Generated video URL: {response.output_data['video_url']}")
except FlyMyAIPredictException as e:
print(f"Error: {e}")

Asynchronous Mode ⚡

Basic Async Usage

import asyncio
from flymyai import client, FlyMyAIPredictException

async def generate_image():
apikey = "fly-***"
model = "flymyai/hidream-i1-dev"

fma_client = client(apikey=apikey)

try:
response = await fma_client.apredict(
model=model,
payload={
"prompt": "A magical forest",
"width": 512,
"height": 512
}
)
print(f"Generated image URL: {response.output_data['image_url']}")
except FlyMyAIPredictException as e:
print(f"Error: {e}")

# Run the async function
asyncio.run(generate_image())

Parallel Async Requests

import asyncio
from flymyai import client, FlyMyAIPredictException

async def generate_multiple_images():
apikey = "fly-***"
model = "flymyai/hidream-i1-dev"

fma_client = client(apikey=apikey)

prompts = [
"A magical forest",
"A futuristic city",
"An underwater scene"
]

async def generate_single(prompt):
try:
response = await fma_client.apredict(
model=model,
payload={
"prompt": prompt,
"width": 512,
"height": 512
}
)
return response.output_data['image_url']
except FlyMyAIPredictException as e:
print(f"Error with prompt '{prompt}': {e}")
return None

# Generate images in parallel
tasks = [generate_single(prompt) for prompt in prompts]
results = await asyncio.gather(*tasks)

# Print results
for prompt, url in zip(prompts, results):
if url:
print(f"Generated '{prompt}': {url}")

# Run the async function
asyncio.run(generate_multiple_images())

Streaming Mode 🌊

Basic Streaming

from flymyai import client, FlyMyAIPredictException

apikey = "fly-***"
model = "flymyai/hidream-i1-dev"

fma_client = client(apikey=apikey)

try:
# Start streaming prediction
for chunk in fma_client.predict_stream(
model=model,
payload={
"prompt": "A beautiful landscape",
"width": 512,
"height": 512
}
):
# Process each chunk of data
print(f"Received chunk: {chunk}")
except FlyMyAIPredictException as e:
print(f"Error: {e}")

Async Streaming

import asyncio
from flymyai import client, FlyMyAIPredictException

async def stream_prediction():
apikey = "fly-***"
model = "flymyai/hidream-i1-dev"

fma_client = client(apikey=apikey)

try:
# Start async streaming
async for chunk in fma_client.apredict_stream(
model=model,
payload={
"prompt": "A beautiful landscape",
"width": 512,
"height": 512
}
):
# Process each chunk of data
print(f"Received chunk: {chunk}")
except FlyMyAIPredictException as e:
print(f"Error: {e}")

# Run the async streaming function
asyncio.run(stream_prediction())

Advanced Features 🛠️

Working with Files 📁

from flymyai import client, FlyMyAIPredictException
import pathlib

apikey = "fly-***"
model = "flymyai/resnet"

# Initialize client
fma_client = client(apikey=apikey)

# Prepare image file
image_path = pathlib.Path("/path/to/image.png")

try:
# Make prediction with local file
response = fma_client.predict(
model=model,
payload={"i_image": image_path}
)
print(f"Classification result: {response.output_data['o_495']}")
except FlyMyAIPredictException as e:
print(f"Error: {e}")

Decoding Base64 Response 🔄

from flymyai import client, FlyMyAIPredictException
import base64
from PIL import Image
import io

apikey = "fly-***"
model = "flymyai/hidream-i1-dev"

fma_client = client(apikey=apikey)

try:
# Make prediction
response = fma_client.predict(
model=model,
payload={
"prompt": "A beautiful sunset",
"width": 512,
"height": 512
}
)

# Decode base64 image
image_data = base64.b64decode(response.output_data['image_base64'])
image = Image.open(io.BytesIO(image_data))

# Save or process the image
image.save("generated_image.png")
except FlyMyAIPredictException as e:
print(f"Error: {e}")

Using Environment Variables 🔑

from flymyai import client, FlyMyAIPredictException
import os

# Get API key from environment variable
apikey = os.getenv("FLYMYAI_API_KEY")

# Initialize client
fma_client = client(apikey=apikey)

try:
response = fma_client.predict(
model="flymyai/hidream-i1-dev",
payload={
"prompt": "A beautiful landscape",
"width": 512,
"height": 512
}
)
print(f"Generated image URL: {response.output_data['image_url']}")
except FlyMyAIPredictException as e:
print(f"Error: {e}")

Error Handling 🛡️

The SDK provides detailed error handling through the FlyMyAIPredictException class:

try:
response = fma_client.predict(model=model, payload=payload)
except FlyMyAIPredictException as e:
print(f"Error code: {e.code}")
print(f"Error message: {e.message}")
print(f"Error details: {e.details}")

Best Practices 💡

  1. Always use try-except blocks to handle potential errors
  2. Set appropriate timeouts for long-running operations
  3. Validate input parameters before making API calls
  4. Use environment variables for API keys
  5. Implement proper error logging
  6. Handle base64 responses efficiently
  7. Use appropriate file formats for image processing
  8. Monitor API usage and rate limits
  9. Use async mode for better performance with multiple requests
  10. Implement proper error handling in streaming mode
  11. Use context managers for resource cleanup
  12. Implement retry logic for failed requests

Additional Resources 📚