148 lines
4.8 KiB
TypeScript
148 lines
4.8 KiB
TypeScript
import * as plugins from './plugins.js';
|
|
|
|
export interface IImageGenerateOptions {
|
|
apiKey: string;
|
|
prompt: string;
|
|
model?: 'gpt-image-1' | 'dall-e-3' | 'dall-e-2';
|
|
quality?: 'low' | 'medium' | 'high' | 'standard' | 'hd' | 'auto';
|
|
size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | '1792x1024' | '1024x1792' | 'auto';
|
|
style?: 'vivid' | 'natural';
|
|
background?: 'transparent' | 'opaque' | 'auto';
|
|
outputFormat?: 'png' | 'jpeg' | 'webp';
|
|
outputCompression?: number;
|
|
moderation?: 'low' | 'auto';
|
|
n?: number;
|
|
stream?: boolean;
|
|
partialImages?: number;
|
|
}
|
|
|
|
export interface IImageEditOptions {
|
|
apiKey: string;
|
|
image: Buffer;
|
|
prompt: string;
|
|
mask?: Buffer;
|
|
model?: 'gpt-image-1' | 'dall-e-2';
|
|
quality?: 'low' | 'medium' | 'high' | 'standard' | 'auto';
|
|
size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto';
|
|
background?: 'transparent' | 'opaque' | 'auto';
|
|
outputFormat?: 'png' | 'jpeg' | 'webp';
|
|
outputCompression?: number;
|
|
n?: number;
|
|
stream?: boolean;
|
|
partialImages?: number;
|
|
}
|
|
|
|
export interface IImageResponse {
|
|
images: Array<{
|
|
b64_json?: string;
|
|
url?: string;
|
|
revisedPrompt?: string;
|
|
}>;
|
|
metadata?: {
|
|
model: string;
|
|
quality?: string;
|
|
size?: string;
|
|
outputFormat?: string;
|
|
tokensUsed?: number;
|
|
};
|
|
}
|
|
|
|
export async function generateImage(options: IImageGenerateOptions): Promise<IImageResponse> {
|
|
const client = new plugins.OpenAI({ apiKey: options.apiKey });
|
|
const model = options.model || 'gpt-image-1';
|
|
|
|
const requestParams: Record<string, unknown> = {
|
|
model,
|
|
prompt: options.prompt,
|
|
n: options.n || 1,
|
|
};
|
|
|
|
if (model === 'gpt-image-1') {
|
|
if (options.quality) requestParams.quality = options.quality;
|
|
if (options.size) requestParams.size = options.size;
|
|
if (options.background) requestParams.background = options.background;
|
|
if (options.outputFormat) requestParams.output_format = options.outputFormat;
|
|
if (options.outputCompression !== undefined) requestParams.output_compression = options.outputCompression;
|
|
if (options.moderation) requestParams.moderation = options.moderation;
|
|
if (options.stream !== undefined) requestParams.stream = options.stream;
|
|
if (options.partialImages !== undefined) requestParams.partial_images = options.partialImages;
|
|
} else if (model === 'dall-e-3') {
|
|
if (options.quality) requestParams.quality = options.quality;
|
|
if (options.size) requestParams.size = options.size;
|
|
if (options.style) requestParams.style = options.style;
|
|
requestParams.response_format = 'b64_json';
|
|
} else if (model === 'dall-e-2') {
|
|
if (options.size) requestParams.size = options.size;
|
|
requestParams.response_format = 'b64_json';
|
|
}
|
|
|
|
const result: any = await client.images.generate(requestParams as any);
|
|
|
|
const images = (result.data || []).map((img: any) => ({
|
|
b64_json: img.b64_json,
|
|
url: img.url,
|
|
revisedPrompt: img.revised_prompt,
|
|
}));
|
|
|
|
return {
|
|
images,
|
|
metadata: {
|
|
model,
|
|
quality: result.quality,
|
|
size: result.size,
|
|
outputFormat: result.output_format,
|
|
tokensUsed: result.usage?.total_tokens,
|
|
},
|
|
};
|
|
}
|
|
|
|
export async function editImage(options: IImageEditOptions): Promise<IImageResponse> {
|
|
const client = new plugins.OpenAI({ apiKey: options.apiKey });
|
|
const model = options.model || 'gpt-image-1';
|
|
|
|
const imageFile = await plugins.toFile(options.image, 'image.png', { type: 'image/png' });
|
|
|
|
const requestParams: Record<string, unknown> = {
|
|
model,
|
|
image: imageFile,
|
|
prompt: options.prompt,
|
|
n: options.n || 1,
|
|
};
|
|
|
|
if (options.mask) {
|
|
requestParams.mask = await plugins.toFile(options.mask, 'mask.png', { type: 'image/png' });
|
|
}
|
|
|
|
if (model === 'gpt-image-1') {
|
|
if (options.quality) requestParams.quality = options.quality;
|
|
if (options.size) requestParams.size = options.size;
|
|
if (options.background) requestParams.background = options.background;
|
|
if (options.outputFormat) requestParams.output_format = options.outputFormat;
|
|
if (options.outputCompression !== undefined) requestParams.output_compression = options.outputCompression;
|
|
if (options.stream !== undefined) requestParams.stream = options.stream;
|
|
if (options.partialImages !== undefined) requestParams.partial_images = options.partialImages;
|
|
} else if (model === 'dall-e-2') {
|
|
if (options.size) requestParams.size = options.size;
|
|
requestParams.response_format = 'b64_json';
|
|
}
|
|
|
|
const result: any = await client.images.edit(requestParams as any);
|
|
|
|
const images = (result.data || []).map((img: any) => ({
|
|
b64_json: img.b64_json,
|
|
url: img.url,
|
|
revisedPrompt: img.revised_prompt,
|
|
}));
|
|
|
|
return {
|
|
images,
|
|
metadata: {
|
|
model,
|
|
quality: result.quality,
|
|
size: result.size,
|
|
outputFormat: result.output_format,
|
|
tokensUsed: result.usage?.total_tokens,
|
|
},
|
|
};
|
|
}
|