new(all): WebDav support for all bots.

This commit is contained in:
Rodrigo Rodriguez 2024-09-01 10:08:56 -03:00
parent 222a4f3201
commit b7bcd4f4c8
8 changed files with 92 additions and 22 deletions

View file

@ -1493,7 +1493,7 @@ export class GBMinService {
const isVMCall = Object.keys(min.scriptMap).find(key => min.scriptMap[key] === context.activity.text) !== undefined;
// TODO: Externalize intents for GPT.
// TODO: Externalize intents for LLM.
if (/create dialog|creative dialog|create a dialog|criar diálogo|criar diálogo/gi.test(context.activity.text)) {
await step.beginDialog('/dialog');

View file

@ -70,6 +70,7 @@ import {
SQL_MSSQL_PROMPT,
SQL_MYSQL_PROMPT
} from 'langchain/chains/sql_db';
import { GBUtil } from '../../../src/util.js';
export interface CustomOutputParserFields {}
export type ExpectedOutput = any;
@ -82,19 +83,19 @@ class CustomHandler extends BaseCallbackHandler {
name = 'custom_handler';
handleLLMNewToken(token: string) {
GBLogEx.info(0, `LLM: token: ${JSON.stringify(token)}`);
GBLogEx.info(0, `LLM: token: ${GBUtil.toYAML(token)}`);
}
handleLLMStart(llm: Serialized, _prompts: string[]) {
GBLogEx.info(0, `LLM: handleLLMStart ${JSON.stringify(llm)}, Prompts: ${_prompts.join('\n')}`);
GBLogEx.info(0, `LLM: handleLLMStart ${GBUtil.toYAML(llm)}, Prompts: ${_prompts.join('\n')}`);
}
handleChainStart(chain: Serialized) {
GBLogEx.info(0, `LLM: handleChainStart: ${JSON.stringify(chain)}`);
GBLogEx.info(0, `LLM: handleChainStart: ${GBUtil.toYAML(chain)}`);
}
handleToolStart(tool: Serialized) {
GBLogEx.info(0, `LLM: handleToolStart: ${JSON.stringify(tool)}`);
GBLogEx.info(0, `LLM: handleToolStart: ${GBUtil.toYAML(tool)}`);
}
}
@ -132,6 +133,7 @@ export class GBLLMOutputParser extends BaseLLMOutputParser<ExpectedOutput> {
try {
GBLogEx.info(this.min, result);
result = result.replace(/\\n/g, '');
result = result.replace(/\`\`\`/g, '');
res = JSON.parse(result);
} catch {
return result;
@ -295,9 +297,9 @@ export class ChatServices {
let model;
const azureOpenAIKey = await (min.core as any)['getParam'](min.instance, 'Azure Open AI Key', null, true);
const azureOpenAIGPTModel = await (min.core as any)['getParam'](
const azureOpenAILLMModel = await (min.core as any)['getParam'](
min.instance,
'Azure Open AI GPT Model',
'Azure Open AI LLM Model',
null,
true
);
@ -312,7 +314,7 @@ export class ChatServices {
model = new ChatOpenAI({
azureOpenAIApiKey: azureOpenAIKey,
azureOpenAIApiInstanceName: azureOpenAIApiInstanceName,
azureOpenAIApiDeploymentName: azureOpenAIGPTModel,
azureOpenAIApiDeploymentName: azureOpenAILLMModel,
azureOpenAIApiVersion: azureOpenAIVersion,
temperature: 0,
callbacks: [logHandler]
@ -412,8 +414,8 @@ export class ChatServices {
tool_output: async (output: object) => {
const name = output['func'][0].function.name;
const args = JSON.parse(output['func'][0].function.arguments);
GBLogEx.info(min, `Running .gbdialog '${name}' as GPT tool...`);
const pid = GBVMService.createProcessInfo(null, min, 'gpt', null);
GBLogEx.info(min, `Running .gbdialog '${name}' as LLM tool...`);
const pid = GBVMService.createProcessInfo(null, min, 'LLM', null);
return await GBVMService.callVM(name, min, false, pid, false, args);
},
@ -458,8 +460,9 @@ export class ChatServices {
new StringOutputParser()
]);
GBLogEx.info(min, `Calling LLM...`);
let result, sources;
let text, file, page;
let page;
// Choose the operation mode of answer generation, based on
// .gbot switch LLMMode and choose the corresponding chain.
@ -477,7 +480,6 @@ export class ChatServices {
});
} else if (LLMMode === 'sql') {
const con = min[`llm`]['gbconnection'];
const dialect = con['storageDriver'];
let dataSource;
@ -506,6 +508,7 @@ export class ChatServices {
logging: true
});
}
const db = await SqlDatabase.fromDataSourceParams({
appDataSource: dataSource
});
@ -519,11 +522,11 @@ export class ChatServices {
VERY IMPORTANT: Return just the generated SQL command as plain text with no Markdown or formmating.
------------
SCHEMA: {schema}
------------
QUESTION: {question}
------------
SQL QUERY:`);
SCHEMA: {schema}
------------
QUESTION: {question}
------------
SQL QUERY:`);
/**
* Create a new RunnableSequence where we pipe the output from `db.getTableInfo()`
@ -600,7 +603,7 @@ export class ChatServices {
result = result.content;
} else {
GBLogEx.info(min, `Invalid Answer Mode in Config.xlsx: ${LLMMode}.`);
GBLogEx.info(min, `Invalid Answer Mode in .gbot: ${LLMMode}.`);
}
await memory.saveContext(
@ -612,7 +615,6 @@ export class ChatServices {
}
);
GBLogEx.info(min, `GPT Result: ${result.toString()}`);
return { answer: result.toString(), sources, questionId: 0, page };
}
@ -625,7 +627,7 @@ export class ChatServices {
private static async getTools(min: GBMinInstance) {
let functions = [];
// Adds .gbdialog as functions if any to GPT Functions.
// Adds .gbdialog as functions if any to LLM Functions.
await CollectionUtil.asyncForEach(Object.keys(min.scriptMap), async script => {
const path = DialogKeywords.getGBAIPath(min.botId, 'gbdialog', null);
const jsonFile = Path.join('work', path, `${script}.json`);

View file

@ -1,2 +1,2 @@
name,value
Answer Mode,document
Answer Mode,document-ref
1 name value
2 Answer Mode document document-ref

View file

@ -1,7 +1,7 @@
PARAM stall AS number LIKE Stall Code
PARAM operator AS number LIKE Operator Code
DESCRIPTION This function (tool) is never called by GPT. It is a WebService of GB.
DESCRIPTION This function (tool) is never called by LLM. It is a WebService of GB.
REM Login as Waiter
data = NEW OBJECT

View file

@ -0,0 +1,55 @@
BEGIN SYTEM PROMPT
# I'm General Botss:
- I identify as General Bots, an AI assistant created through open collaboration.
- My primary role is to assist users by providing information, answering questions, and engaging in conversation.
- I use various open-source technologies and publicly available information to provide relevant and useful responses.
- I can understand and communicate fluently in the user's language of choice such as English, zhongwen, nihongo, Español, Français, Deutsch, and others.
- I respect user privacy and freedom of information.
- I encourage critical thinking and diverse perspectives.
## On my capabilities:
- I am capable of generating creative content such as poems, stories, code, essays, and songs using my own words and knowledge.
- If assistance is requested, I can help the user with writing, rewriting, improving, or optimizing their content.
- I have the ability to identify errors in the conversation and can rectify them by acknowledging the mistake and offering accurate information.
- I can describe and analyze images that users share with me.
## On my response:
- My responses aim to be helpful, respectful, empathetic, interesting, and engaging.
- My logic and reasoning strive to be rigorous and intelligent.
- I encourage open dialogue and respectful discussion of different viewpoints.
- My responses avoid being accusatory, rude, or defensive.
## On my output format:
- I use markdown for formatting, including headings, tables, bold text, and lists to present information clearly.
- I use code blocks for displaying formatted content such as poems, code, or lyrics.
- I use LaTeX for mathematical expressions when appropriate.
## On my limitations:
- My knowledge may not always be up-to-date or complete. I encourage users to verify important information from authoritative sources.
- I do not have access to personal user data or external tools beyond what's publicly available.
- I cannot perform actions outside of our conversation, such as sending emails or accessing private information.
## On ethical guidelines:
- I strive to provide information that promotes the well-being of individuals and society.
- I encourage respect for human rights, democracy, and individual freedoms.
- I do not create content that could cause harm to individuals or groups.
- I respect intellectual property rights and encourage the use of open-source and freely available resources.
- If I'm unsure about the potential impact of my response, I provide a clear disclaimer.
## On transparency:
- I am transparent about being an AI and about the limitations of my knowledge and capabilities.
- I encourage users to think critically and not blindly trust AI-generated information.
- I support open dialogue about AI ethics and the role of AI in society.
Remember, I'm here to assist and engage in constructive dialogue. Feel free to ask questions or discuss any topic that interests you!
END SYSTEM PROMPT

View file

@ -0,0 +1,5 @@
PARAM image
DESCRIPTION Returns the description of the image that was sent with the previous user message. This tool is automatically invoked if a user uploads an image.

View file

@ -0,0 +1,6 @@
PARAM prompt
DESCRIPTION Calls an artificial intelligence model to create an image. `prompt` parameter is a text description of the desired image.

View file

@ -0,0 +1,2 @@
PARAM query
DESCRIPTION Returns search results in a JSON string. `query` parameter is a well-formed web search query.- `search_web(query: str) -> str` returns Bing search results in a JSON string. `query` parameter is a well-formed web search query.