Advanced OpenAI SDK: Function Calling and Custom GPTs

OpenAI SDK Avanzado

El SDK de OpenAI ofrece capacidades avanzadas que van más allá del chat básico, incluyendo function calling, custom GPTs, y integración con herramientas externas.

Function Calling Avanzado

Definición de Funciones

const functions = [
    {
        name: "search_database",
        description: "Buscar información en la base de datos",
        parameters: {
            type: "object",
            properties: {
                query: {
                    type: "string",
                    description: "Consulta de búsqueda"
                },
                filters: {
                    type: "object",
                    properties: {
                        category: { type: "string" },
                        date_range: { type: "string" }
                    }
                }
            },
            required: ["query"]
        }
    },
    {
        name: "send_email",
        description: "Enviar email a un destinatario",
        parameters: {
            type: "object",
            properties: {
                to: { type: "string" },
                subject: { type: "string" },
                body: { type: "string" }
            },
            required: ["to", "subject", "body"]
        }
    }
];

Implementación de Function Calling

async function handleFunctionCall(message, functions) {
    const completion = await openai.chat.completions.create({
        model: "gpt-4",
        messages: [message],
        functions: functions,
        function_call: "auto"
    });
    
    const message_response = completion.choices[0].message;
    
    if (message_response.function_call) {
        const function_name = message_response.function_call.name;
        const function_args = JSON.parse(message_response.function_call.arguments);
        
        // Ejecutar la función
        const function_result = await executeFunction(function_name, function_args);
        
        // Enviar resultado de vuelta a GPT
        const follow_up = await openai.chat.completions.create({
            model: "gpt-4",
            messages: [
                message,
                message_response,
                {
                    role: "function",
                    name: function_name,
                    content: JSON.stringify(function_result)
                }
            ]
        });
        
        return follow_up.choices[0].message.content;
    }
    
    return message_response.content;
}

Custom GPTs

Creación de GPTs Personalizados

class CustomGPT {
    constructor(openai, instructions, knowledge_base) {
        this.openai = openai;
        this.instructions = instructions;
        this.knowledge_base = knowledge_base;
    }
    
    async createGPT(name, description) {
        const gpt = await this.openai.beta.assistants.create({
            name: name,
            description: description,
            instructions: this.instructions,
            model: "gpt-4-turbo-preview",
            tools: [
                { type: "code_interpreter" },
                { type: "file_search" }
            ],
            tool_resources: {
                file_search: {
                    vector_store_ids: [this.knowledge_base]
                }
            }
        });
        
        return gpt;
    }
    
    async chatWithGPT(assistant_id, message) {
        const thread = await this.openai.beta.threads.create();
        
        await this.openai.beta.threads.messages.create(thread.id, {
            role: "user",
            content: message
        });
        
        const run = await this.openai.beta.threads.runs.create(thread.id, {
            assistant_id: assistant_id
        });
        
        // Esperar a que termine el procesamiento
        let run_status = await this.openai.beta.threads.runs.retrieve(thread.id, run.id);
        while (run_status.status !== "completed") {
            await new Promise(resolve => setTimeout(resolve, 1000));
            run_status = await this.openai.beta.threads.runs.retrieve(thread.id, run.id);
        }
        
        const messages = await this.openai.beta.threads.messages.list(thread.id);
        return messages.data[0].content[0].text.value;
    }
}

Integración con Herramientas Externas

API de Terceros

class ExternalToolIntegration {
    constructor(openai) {
        this.openai = openai;
    }
    
    async integrateWithAPI(apiEndpoint, apiKey, userQuery) {
        const functions = [
            {
                name: "call_external_api",
                description: "Llamar a API externa",
                parameters: {
                    type: "object",
                    properties: {
                        endpoint: { type: "string" },
                        method: { type: "string" },
                        data: { type: "object" }
                    }
                }
            }
        ];
        
        const completion = await this.openai.chat.completions.create({
            model: "gpt-4",
            messages: [
                {
                    role: "user",
                    content: userQuery
                }
            ],
            functions: functions,
            function_call: "auto"
        });
        
        if (completion.choices[0].message.function_call) {
            const function_args = JSON.parse(completion.choices[0].message.function_call.arguments);
            return await this.executeAPICall(function_args, apiEndpoint, apiKey);
        }
    }
    
    async executeAPICall(args, endpoint, apiKey) {
        const response = await fetch(`${endpoint}${args.endpoint}`, {
            method: args.method,
            headers: {
                'Authorization': `Bearer ${apiKey}`,
                'Content-Type': 'application/json'
            },
            body: JSON.stringify(args.data)
        });
        
        return await response.json();
    }
}

Streaming y Tiempo Real

Respuestas en Tiempo Real

async function streamResponse(messages) {
    const stream = await openai.chat.completions.create({
        model: "gpt-4",
        messages: messages,
        stream: true
    });
    
    for await (const chunk of stream) {
        const content = chunk.choices[0]?.delta?.content;
        if (content) {
            process.stdout.write(content);
        }
    }
}

Optimización de Costos

Estrategias de Ahorro

  • Modelos apropiados: Usar gpt-3.5-turbo para tareas simples
  • Cache de respuestas: Almacenar respuestas frecuentes
  • Prompt engineering: Optimizar prompts para reducir tokens
  • Batching: Agrupar múltiples requests
class CostOptimizer {
    constructor() {
        this.cache = new Map();
    }
    
    async getOptimizedResponse(prompt, useCache = true) {
        if (useCache && this.cache.has(prompt)) {
            return this.cache.get(prompt);
        }
        
        const completion = await openai.chat.completions.create({
            model: "gpt-3.5-turbo", // Modelo más económico
            messages: [
                {
                    role: "system",
                    content: "Responde de manera concisa y directa."
                },
                {
                    role: "user",
                    content: prompt
                }
            ],
            max_tokens: 150 // Limitar tokens
        });
        
        const response = completion.choices[0].message.content;
        
        if (useCache) {
            this.cache.set(prompt, response);
        }
        
        return response;
    }
}

Monitoreo y Analytics

Métricas Importantes

  • Uso de tokens: Tracking detallado del consumo
  • Latencia: Tiempo de respuesta de las APIs
  • Rate limits: Monitoreo de límites de API
  • Costos: Análisis de costos por funcionalidad
class APIMonitor {
    constructor() {
        this.metrics = {
            totalTokens: 0,
            totalCost: 0,
            requestCount: 0,
            averageLatency: 0
        };
    }
    
    async trackRequest(completion, startTime) {
        const latency = Date.now() - startTime;
        const tokens = completion.usage.total_tokens;
        const cost = this.calculateCost(tokens);
        
        this.metrics.totalTokens += tokens;
        this.metrics.totalCost += cost;
        this.metrics.requestCount++;
        this.metrics.averageLatency = 
            (this.metrics.averageLatency + latency) / 2;
        
        return {
            tokens,
            cost,
            latency
        };
    }
    
    calculateCost(tokens) {
        // Precios aproximados por 1K tokens
        const gpt4Price = 0.03;
        const gpt35Price = 0.002;
        
        return tokens * gpt4Price / 1000;
    }
}

Subscribe to AI.TDD Articles

Don’t miss out on the latest issues. Sign up now to get access to the library of members-only issues.
jamie@example.com
Subscribe