Prompt
const getRelevantParts = async (text, query) => {
try {
// We use your OpenAI key to initialize the OpenAI client
const openAIKey = process.env["OPENAI_API_KEY"];
const openai = new OpenAI({
apiKey: openAIKey,
});
const response = await openai.chat.completions.create({
// Using gpt-3.5-turbo due to speed to prevent timeouts. You can tweak this prompt as needed
model: "gpt-3.5-turbo-0125",
messages: [
{"role": "system", "content": "You are a helpful assistant that finds relevant content in text based on a query. You only return the relevant sentences, and you return a maximum of 10 sentences"},
{"role": "user", "content": `Based on this question: **"${query}"**, get the relevant parts from the following text:*****\n\n${text}*****. If you cannot answer the question based on the text, respond with 'No information provided'`}
],
// using temperature of 0 since we want to just extract the relevant content
temperature: 0,
// using max_tokens of 1000, but you can customize this based on the number of documents you are searching.
max_tokens: 1000
});
return response.choices[0].message.content;
} catch (error) {
console.error('Error with OpenAI:', error);
return 'Error processing text with OpenAI' + error;
}
};