mirror of
https://github.com/gristlabs/grist-core.git
synced 2024-10-27 20:44:07 +00:00
d29770511c
Summary: The feature is behind a flag GRIST_FORMULA_ASSISTANT (must be "true"). But can be enabled in the developer console by invoking GRIST_FORMULA_ASSISTANT.set(true). Keys can be overriden in the document settings page. Test Plan: For now just a stub test that checks if this feature is disabled by default. Reviewers: paulfitz Reviewed By: paulfitz Subscribers: dsagal Differential Revision: https://phab.getgrist.com/D3815
118 lines
3.5 KiB
TypeScript
118 lines
3.5 KiB
TypeScript
/**
|
|
* Module with functions used for AI formula assistance.
|
|
*/
|
|
|
|
import {delay} from 'app/common/delay';
|
|
import log from 'app/server/lib/log';
|
|
import fetch from 'node-fetch';
|
|
|
|
export const DEPS = { fetch };
|
|
|
|
export async function sendForCompletion(prompt: string): Promise<string> {
|
|
let completion: string|null = null;
|
|
let retries: number = 0;
|
|
const openApiKey = process.env.OPENAI_API_KEY;
|
|
const model = process.env.COMPLETION_MODEL || "text-davinci-002";
|
|
|
|
while(retries++ < 3) {
|
|
try {
|
|
if (openApiKey) {
|
|
completion = await sendForCompletionOpenAI(prompt, openApiKey, model);
|
|
}
|
|
if (process.env.HUGGINGFACE_API_KEY) {
|
|
completion = await sendForCompletionHuggingFace(prompt);
|
|
}
|
|
break;
|
|
} catch(e) {
|
|
await delay(1000);
|
|
}
|
|
}
|
|
if (completion === null) {
|
|
throw new Error("Please set OPENAI_API_KEY or HUGGINGFACE_API_KEY (and optionally COMPLETION_MODEL)");
|
|
}
|
|
log.debug(`Received completion:`, {completion});
|
|
completion = completion.split(/\n {4}[^ ]/)[0];
|
|
return completion;
|
|
}
|
|
|
|
|
|
async function sendForCompletionOpenAI(prompt: string, apiKey: string, model = "text-davinci-002") {
|
|
if (!apiKey) {
|
|
throw new Error("OPENAI_API_KEY not set");
|
|
}
|
|
const response = await DEPS.fetch(
|
|
"https://api.openai.com/v1/completions",
|
|
{
|
|
method: "POST",
|
|
headers: {
|
|
"Authorization": `Bearer ${apiKey}`,
|
|
"Content-Type": "application/json",
|
|
},
|
|
body: JSON.stringify({
|
|
prompt,
|
|
max_tokens: 150,
|
|
temperature: 0,
|
|
// COMPLETION_MODEL of `code-davinci-002` may be better if you have access to it.
|
|
model,
|
|
stop: ["\n\n"],
|
|
}),
|
|
},
|
|
);
|
|
if (response.status !== 200) {
|
|
log.error(`OpenAI API returned ${response.status}: ${await response.text()}`);
|
|
throw new Error(`OpenAI API returned status ${response.status}`);
|
|
}
|
|
const result = await response.json();
|
|
const completion = result.choices[0].text;
|
|
return completion;
|
|
}
|
|
|
|
async function sendForCompletionHuggingFace(prompt: string) {
|
|
const apiKey = process.env.HUGGINGFACE_API_KEY;
|
|
if (!apiKey) {
|
|
throw new Error("HUGGINGFACE_API_KEY not set");
|
|
}
|
|
// COMPLETION_MODEL values I've tried:
|
|
// - codeparrot/codeparrot
|
|
// - NinedayWang/PolyCoder-2.7B
|
|
// - NovelAI/genji-python-6B
|
|
let completionUrl = process.env.COMPLETION_URL;
|
|
if (!completionUrl) {
|
|
if (process.env.COMPLETION_MODEL) {
|
|
completionUrl = `https://api-inference.huggingface.co/models/${process.env.COMPLETION_MODEL}`;
|
|
} else {
|
|
completionUrl = 'https://api-inference.huggingface.co/models/NovelAI/genji-python-6B';
|
|
}
|
|
}
|
|
|
|
const response = await DEPS.fetch(
|
|
completionUrl,
|
|
{
|
|
method: "POST",
|
|
headers: {
|
|
"Authorization": `Bearer ${apiKey}`,
|
|
"Content-Type": "application/json",
|
|
},
|
|
body: JSON.stringify({
|
|
inputs: prompt,
|
|
parameters: {
|
|
return_full_text: false,
|
|
max_new_tokens: 50,
|
|
},
|
|
}),
|
|
},
|
|
);
|
|
if (response.status === 503) {
|
|
log.error(`Sleeping for 10s - HuggingFace API returned ${response.status}: ${await response.text()}`);
|
|
await delay(10000);
|
|
}
|
|
if (response.status !== 200) {
|
|
const text = await response.text();
|
|
log.error(`HuggingFace API returned ${response.status}: ${text}`);
|
|
throw new Error(`HuggingFace API returned status ${response.status}: ${text}`);
|
|
}
|
|
const result = await response.json();
|
|
const completion = result[0].generated_text;
|
|
return completion.split('\n\n')[0];
|
|
}
|