fix(basic.gblib): FUNCTION GPT.
This commit is contained in:
parent
ac0b3f150e
commit
158c1603c9
4 changed files with 147 additions and 118 deletions
13
package.json
13
package.json
|
@ -76,13 +76,14 @@
|
|||
"@azure/keyvault-keys": "4.6.0",
|
||||
"@azure/ms-rest-js": "2.6.2",
|
||||
"@azure/msal-node": "1.14.3",
|
||||
"@azure/search-documents": "11.3.1",
|
||||
"@azure/search-documents": "12.0.0",
|
||||
"@azure/storage-blob": "12.17.0",
|
||||
"@google-cloud/pubsub": "3.2.1",
|
||||
"@google-cloud/translate": "7.0.4",
|
||||
"@hubspot/api-client": "7.1.2",
|
||||
"@koa/cors": "4.0.0",
|
||||
"@langchain/openai": "0.0.15",
|
||||
"@langchain/community": "^0.0.36",
|
||||
"@langchain/openai": "^0.0.15",
|
||||
"@microsoft/microsoft-graph-client": "3.0.4",
|
||||
"@nlpjs/basic": "4.26.1",
|
||||
"@nosferatu500/textract": "3.1.2",
|
||||
|
@ -134,21 +135,23 @@
|
|||
"ffmpeg-static": "5.1.0",
|
||||
"google-libphonenumber": "3.2.31",
|
||||
"googleapis": "126.0.1",
|
||||
"hnswlib-node": "^1.4.2",
|
||||
"ibm-watson": "7.1.2",
|
||||
"iso-639-1": "3.1.1",
|
||||
"join-images-updated": "1.1.4",
|
||||
"js-md5": "0.8.3",
|
||||
"json-schema-to-zod": "^2.0.14",
|
||||
"just-indent": "0.0.1",
|
||||
"keyv": "4.5.2",
|
||||
"koa": "2.13.4",
|
||||
"koa-body": "6.0.1",
|
||||
"koa-router": "12.0.0",
|
||||
"langchain": "^0.0.163",
|
||||
"langchain": "0.1.25",
|
||||
"language-tags": "^1.0.9",
|
||||
"line-replace": "2.0.1",
|
||||
"lodash": "4.17.21",
|
||||
"luxon": "3.1.0",
|
||||
"mammoth": "1.5.1",
|
||||
"mammoth": "1.7.0",
|
||||
"mariadb": "3.2.2",
|
||||
"mime-types": "2.1.35",
|
||||
"moment": "1.3.0",
|
||||
|
@ -212,7 +215,7 @@
|
|||
"whatsapp-web.js": "https://github.com/Julzk/whatsapp-web.js/tarball/jkr_hotfix_7",
|
||||
"winston": "3.8.2",
|
||||
"winston-logs-display": "1.0.0",
|
||||
"ws": "8.12.1",
|
||||
"ws": "8.14.2",
|
||||
"yarn": "1.22.19",
|
||||
"zod-to-json-schema": "^3.22.4"
|
||||
},
|
||||
|
|
|
@ -30,21 +30,29 @@
|
|||
|
||||
'use strict';
|
||||
|
||||
import { RunnableSequence } from "@langchain/core/runnables";
|
||||
import { ChatOpenAI } from "@langchain/openai";
|
||||
import { GBMinInstance } from 'botlib';
|
||||
import { CallbackManager } from 'langchain/callbacks';
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts';
|
||||
import { ConversationChain, LLMChain } from 'langchain/chains';
|
||||
import * as Fs from 'fs';
|
||||
import { formatXml } from "langchain/agents/format_scratchpad/xml";
|
||||
import { jsonSchemaToZod } from "json-schema-to-zod";
|
||||
import { renderTextDescription } from "langchain/tools/render";
|
||||
|
||||
import { AgentExecutor, AgentStep } from "langchain/agents";
|
||||
import { BufferWindowMemory } from 'langchain/memory';
|
||||
import { AIMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts';
|
||||
import { Tool } from "langchain/tools";
|
||||
import { HNSWLib } from '@langchain/community/vectorstores/hnswlib';
|
||||
import Path from 'path';
|
||||
import { CollectionUtil } from 'pragmatismo-io-framework';
|
||||
import { DialogKeywords } from '../../basic.gblib/services/DialogKeywords.js';
|
||||
import Path from 'path';
|
||||
import * as Fs from 'fs';
|
||||
import { HNSWLib } from 'langchain/vectorstores/hnswlib';
|
||||
import { GuaribasSubject } from '../../kb.gbapp/models/index.js';
|
||||
import { GBConfigService } from '../../core.gbapp/services/GBConfigService.js';
|
||||
import { ChatOpenAI } from "@langchain/openai";
|
||||
import { JsonOutputFunctionsParser } from 'langchain/dist/output_parsers/openai_functions.js';
|
||||
import { GBVMService } from '../../basic.gblib/services/GBVMService.js';
|
||||
import { GBConfigService } from '../../core.gbapp/services/GBConfigService.js';
|
||||
import { GuaribasSubject } from '../../kb.gbapp/models/index.js';
|
||||
import { XMLAgentOutputParser } from "langchain/agents/xml/output_parser";
|
||||
import { StringOutputParser } from "@langchain/core/output_parsers";
|
||||
import { convertToOpenAITool } from "@langchain/core/utils/function_calling";
|
||||
|
||||
|
||||
|
||||
export class ChatServices {
|
||||
|
@ -81,129 +89,147 @@ export class ChatServices {
|
|||
subjects: GuaribasSubject[]
|
||||
) {
|
||||
|
||||
return { answer: undefined, questionId: 0 };
|
||||
|
||||
if (!process.env.OPENAI_API_KEY) {
|
||||
return { answer: undefined, questionId: 0 };
|
||||
}
|
||||
|
||||
const systemPrompt = SystemMessagePromptTemplate.fromTemplate(
|
||||
`You are $${min.botId}`);
|
||||
|
||||
const contentLocale = min.core.getParam(
|
||||
min.instance,
|
||||
'Default Content Language',
|
||||
GBConfigService.get('DEFAULT_CONTENT_LANGUAGE')
|
||||
);
|
||||
|
||||
let tools = await ChatServices.getTools(min);
|
||||
let toolsAsText = ChatServices.getToolsAsText(tools);
|
||||
|
||||
const memory = new BufferWindowMemory({
|
||||
returnMessages: true,
|
||||
memoryKey: 'chat_history',
|
||||
inputKey: 'input',
|
||||
k: 2,
|
||||
});
|
||||
|
||||
const model = new ChatOpenAI({
|
||||
openAIApiKey: process.env.OPENAI_API_KEY,
|
||||
modelName: "gpt-3.5-turbo-0125",
|
||||
temperature: 0,
|
||||
});
|
||||
|
||||
const contextVectorStore = min['vectorStore'];
|
||||
|
||||
let promptTemplate = `Answer in ${contentLocale}.
|
||||
You have access to the context (RELEVANTDOCS) provided by the user.
|
||||
|
||||
When answering think about whether the question in RELEVANTDOCS, but never mention
|
||||
to user about the source.
|
||||
Don’t justify your answers. Don't refer to yourself in any of the created content.
|
||||
Don´t prefix RESPONSE: when answering the user.
|
||||
RELEVANTDOCS: {context}
|
||||
|
||||
QUESTION: """{input}"""
|
||||
|
||||
You have the following tools that you can invoke based on the user inquiry.
|
||||
Tools:
|
||||
|
||||
${toolsAsText}
|
||||
|
||||
`;
|
||||
|
||||
|
||||
const toolMap: Record<string, any> = {
|
||||
multiply: ()=>{},
|
||||
};
|
||||
|
||||
const modelWithTools = model.bind({
|
||||
tools: tools.map(convertToOpenAITool),
|
||||
});
|
||||
|
||||
const questionGeneratorTemplate = ChatPromptTemplate.fromMessages([
|
||||
AIMessagePromptTemplate.fromTemplate(
|
||||
"Given the following conversation about a codebase and a follow up question, rephrase the follow up question to be a standalone question."
|
||||
),
|
||||
new MessagesPlaceholder("chat_history"),
|
||||
AIMessagePromptTemplate.fromTemplate(`Follow Up Input: {question} Standalone question:`),
|
||||
]);
|
||||
|
||||
const combineDocumentsPrompt = ChatPromptTemplate.fromMessages([
|
||||
AIMessagePromptTemplate.fromTemplate(
|
||||
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\n"
|
||||
),
|
||||
new MessagesPlaceholder("chat_history"),
|
||||
HumanMessagePromptTemplate.fromTemplate("Question: {question}"),
|
||||
]);
|
||||
|
||||
const combineDocumentsChain = RunnableSequence.from([
|
||||
{
|
||||
question: (output: string) => output,
|
||||
chat_history: async () => {
|
||||
const { chat_history } = await memory.loadMemoryVariables({});
|
||||
return chat_history;
|
||||
},
|
||||
context: async (output: string) => {
|
||||
|
||||
return await this.getRelevantContext(contextVectorStore, output, 1);
|
||||
},
|
||||
},
|
||||
combineDocumentsPrompt,
|
||||
modelWithTools,
|
||||
new StringOutputParser(),
|
||||
]);
|
||||
|
||||
const conversationalQaChain = RunnableSequence.from([
|
||||
{
|
||||
question: (i: { question: string }) => i.question,
|
||||
chat_history: async () => {
|
||||
const { chat_history } = await memory.loadMemoryVariables({});
|
||||
return chat_history;
|
||||
},
|
||||
},
|
||||
questionGeneratorTemplate,
|
||||
modelWithTools,
|
||||
new StringOutputParser(),
|
||||
combineDocumentsChain,
|
||||
]);
|
||||
|
||||
const question = "How can I initialize a ReAct agent?";
|
||||
let result = await conversationalQaChain.invoke({
|
||||
question,
|
||||
});
|
||||
|
||||
return { answer: result.toString() , questionId: 0 };
|
||||
|
||||
}
|
||||
|
||||
|
||||
private static getToolsAsText(tools) {
|
||||
return Object.keys(tools)
|
||||
.map((toolname) => `${tools[toolname].function.name}: ${tools[toolname].function.description}`)
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
private static async getTools(min: GBMinInstance) {
|
||||
let functions = [];
|
||||
|
||||
// Adds .gbdialog as functions if any to GPT Functions.
|
||||
|
||||
await CollectionUtil.asyncForEach(Object.keys(min.scriptMap), async script => {
|
||||
await CollectionUtil.asyncForEach(Object.keys(min.scriptMap), async (script) => {
|
||||
const path = DialogKeywords.getGBAIPath(min.botId, "gbdialog", null);
|
||||
const functionJSON = Path.join('work', path, `${script}.json`);
|
||||
|
||||
if (Fs.existsSync(functionJSON)) {
|
||||
const func = JSON.parse(Fs.readFileSync(functionJSON, 'utf8'));
|
||||
|
||||
func.schema = jsonSchemaToZod(func.properties, { module: "esm" });
|
||||
func.func = async ()=>{
|
||||
const name = '';
|
||||
const pid = 1;
|
||||
const text = ''; // TODO:
|
||||
const result = await GBVMService.callVM(name, min, false, pid,false, [text]);
|
||||
|
||||
}
|
||||
|
||||
functions.push(func);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
|
||||
let SystemPromptTailored = ''; // TODO: Load from user context.
|
||||
|
||||
// Generates function definition for each function
|
||||
// in plain text to be used in system prompt.
|
||||
|
||||
let functionDef = Object.keys(functions)
|
||||
.map((toolname) => `${functions[toolname].function.name}: ${functions[toolname].function.description}`)
|
||||
.join("\n");
|
||||
|
||||
let promptTemplate = `Answer in ${contentLocale}.
|
||||
You have access to the context (RELEVANTDOCS) provided by the user.
|
||||
|
||||
When answering think about whether the question in RELEVANTDOCS, but never mention
|
||||
to user about the source.
|
||||
Don’t justify your answers. Don't refer to yourself in any of the created content.
|
||||
Don´t prefix RESPONSE: when answering the user.
|
||||
RELEVANTDOCS: {context}
|
||||
|
||||
QUESTION: """{input}"""
|
||||
|
||||
${SystemPromptTailored}
|
||||
|
||||
You have the following tools that you can invoke based on the user inquiry.
|
||||
Tools:
|
||||
|
||||
${functionDef}
|
||||
|
||||
`;
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromPromptMessages([
|
||||
systemPrompt,
|
||||
HumanMessagePromptTemplate.fromTemplate(promptTemplate),]);
|
||||
|
||||
const windowMemory = new BufferWindowMemory({
|
||||
returnMessages: false,
|
||||
memoryKey: 'immediate_history',
|
||||
inputKey: 'input',
|
||||
k: 2,
|
||||
});
|
||||
|
||||
const llm = new ChatOpenAI({
|
||||
openAIApiKey: process.env.OPENAI_API_KEY,
|
||||
modelName: "gpt-3.5-turbo-0125",
|
||||
temperature: 0,
|
||||
});
|
||||
|
||||
const llmWithTools = llm.bind({
|
||||
tools: functions
|
||||
});
|
||||
|
||||
const chain = new LLMChain({
|
||||
memory: windowMemory,
|
||||
prompt: chatPrompt,
|
||||
llm: llm as any,
|
||||
});
|
||||
|
||||
const contextVectorStore = min['vectorStore'];
|
||||
const question = query.trim().replaceAll('\n', ' ');
|
||||
const context = await this.getRelevantContext(contextVectorStore, question, 1);
|
||||
|
||||
let prompt;
|
||||
|
||||
// allow the LLM to iterate until it finds a final answer
|
||||
while (true) {
|
||||
const response = await chain.call({
|
||||
input: question,
|
||||
context,
|
||||
history: '',
|
||||
immediate_history: '',
|
||||
});
|
||||
|
||||
// add this to the prompt
|
||||
prompt += response;
|
||||
|
||||
const action = response.match(/Action: (.*)/)?.[1];
|
||||
if (action) {
|
||||
// execute the action specified by the LLMs
|
||||
const actionInput = response.match(/Action Input: "?(.*)"?/)?.[1];
|
||||
const text = '';
|
||||
|
||||
const result = await GBVMService.callVM(actionInput, min, false, pid,false, [text]);
|
||||
|
||||
|
||||
prompt += `Observation: ${result}\n`;
|
||||
} else {
|
||||
return response.match(/Final Answer: (.*)/)?.[1];
|
||||
}
|
||||
}
|
||||
return { answer: undefined, questionId: 0 };
|
||||
return functions;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -183,7 +183,7 @@ export class AskDialog extends IGBDialog {
|
|||
}
|
||||
},
|
||||
async step => {
|
||||
let answer: GuaribasAnswer = null;
|
||||
let answer;
|
||||
const member = step.context.activity.from;
|
||||
const sec = new SecService();
|
||||
let user = await sec.ensureUser(min, member.id, member.name, '', 'web', member.name, null);
|
||||
|
|
|
@ -86,7 +86,7 @@ import { ChatServices } from '../../gpt.gblib/services/ChatServices.js';
|
|||
* Result for quey on KB data.
|
||||
*/
|
||||
export class KBServiceSearchResults {
|
||||
public answer: GuaribasAnswer;
|
||||
public answer: string | GuaribasAnswer;
|
||||
public questionId: number;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue