From b7bcd4f4c83b781b0e9287c724f8005b39192f54 Mon Sep 17 00:00:00 2001 From: Rodrigo Rodriguez Date: Sun, 1 Sep 2024 10:08:56 -0300 Subject: [PATCH] new(all): WebDav support for all bots. --- packages/core.gbapp/services/GBMinService.ts | 2 +- packages/llm.gblib/services/ChatServices.ts | 40 +++++++------- .../ai-search.gbai/ai-search.gbot/config.csv | 2 +- .../api-server.gbai/api.gbdialog/start.bas | 2 +- .../default.gbai/default.gbdialog/start.bas | 55 +++++++++++++++++++ .../llm-tools.gbdialog/describe-image.bas | 5 ++ .../llm-tools.gbdialog/generate-image.bas | 6 ++ .../llm-tools.gbdialog/search.bas | 2 + 8 files changed, 92 insertions(+), 22 deletions(-) create mode 100644 templates/default.gbai/default.gbdialog/start.bas create mode 100644 templates/llm-tools.gbai/llm-tools.gbdialog/describe-image.bas create mode 100644 templates/llm-tools.gbai/llm-tools.gbdialog/generate-image.bas create mode 100644 templates/llm-tools.gbai/llm-tools.gbdialog/search.bas diff --git a/packages/core.gbapp/services/GBMinService.ts b/packages/core.gbapp/services/GBMinService.ts index 79d6c761..1ef7651a 100644 --- a/packages/core.gbapp/services/GBMinService.ts +++ b/packages/core.gbapp/services/GBMinService.ts @@ -1493,7 +1493,7 @@ export class GBMinService { const isVMCall = Object.keys(min.scriptMap).find(key => min.scriptMap[key] === context.activity.text) !== undefined; - // TODO: Externalize intents for GPT. + // TODO: Externalize intents for LLM. if (/create dialog|creative dialog|create a dialog|criar diálogo|criar diálogo/gi.test(context.activity.text)) { await step.beginDialog('/dialog'); diff --git a/packages/llm.gblib/services/ChatServices.ts b/packages/llm.gblib/services/ChatServices.ts index 53c51d0b..25df8d09 100644 --- a/packages/llm.gblib/services/ChatServices.ts +++ b/packages/llm.gblib/services/ChatServices.ts @@ -70,6 +70,7 @@ import { SQL_MSSQL_PROMPT, SQL_MYSQL_PROMPT } from 'langchain/chains/sql_db'; +import { GBUtil } from '../../../src/util.js'; export interface CustomOutputParserFields {} export type ExpectedOutput = any; @@ -82,19 +83,19 @@ class CustomHandler extends BaseCallbackHandler { name = 'custom_handler'; handleLLMNewToken(token: string) { - GBLogEx.info(0, `LLM: token: ${JSON.stringify(token)}`); + GBLogEx.info(0, `LLM: token: ${GBUtil.toYAML(token)}`); } handleLLMStart(llm: Serialized, _prompts: string[]) { - GBLogEx.info(0, `LLM: handleLLMStart ${JSON.stringify(llm)}, Prompts: ${_prompts.join('\n')}`); + GBLogEx.info(0, `LLM: handleLLMStart ${GBUtil.toYAML(llm)}, Prompts: ${_prompts.join('\n')}`); } handleChainStart(chain: Serialized) { - GBLogEx.info(0, `LLM: handleChainStart: ${JSON.stringify(chain)}`); + GBLogEx.info(0, `LLM: handleChainStart: ${GBUtil.toYAML(chain)}`); } handleToolStart(tool: Serialized) { - GBLogEx.info(0, `LLM: handleToolStart: ${JSON.stringify(tool)}`); + GBLogEx.info(0, `LLM: handleToolStart: ${GBUtil.toYAML(tool)}`); } } @@ -132,6 +133,7 @@ export class GBLLMOutputParser extends BaseLLMOutputParser { try { GBLogEx.info(this.min, result); result = result.replace(/\\n/g, ''); + result = result.replace(/\`\`\`/g, ''); res = JSON.parse(result); } catch { return result; @@ -295,9 +297,9 @@ export class ChatServices { let model; const azureOpenAIKey = await (min.core as any)['getParam'](min.instance, 'Azure Open AI Key', null, true); - const azureOpenAIGPTModel = await (min.core as any)['getParam']( + const azureOpenAILLMModel = await (min.core as any)['getParam']( min.instance, - 'Azure Open AI GPT Model', + 'Azure Open AI LLM Model', null, true ); @@ -312,7 +314,7 @@ export class ChatServices { model = new ChatOpenAI({ azureOpenAIApiKey: azureOpenAIKey, azureOpenAIApiInstanceName: azureOpenAIApiInstanceName, - azureOpenAIApiDeploymentName: azureOpenAIGPTModel, + azureOpenAIApiDeploymentName: azureOpenAILLMModel, azureOpenAIApiVersion: azureOpenAIVersion, temperature: 0, callbacks: [logHandler] @@ -412,8 +414,8 @@ export class ChatServices { tool_output: async (output: object) => { const name = output['func'][0].function.name; const args = JSON.parse(output['func'][0].function.arguments); - GBLogEx.info(min, `Running .gbdialog '${name}' as GPT tool...`); - const pid = GBVMService.createProcessInfo(null, min, 'gpt', null); + GBLogEx.info(min, `Running .gbdialog '${name}' as LLM tool...`); + const pid = GBVMService.createProcessInfo(null, min, 'LLM', null); return await GBVMService.callVM(name, min, false, pid, false, args); }, @@ -458,8 +460,9 @@ export class ChatServices { new StringOutputParser() ]); + GBLogEx.info(min, `Calling LLM...`); let result, sources; - let text, file, page; + let page; // Choose the operation mode of answer generation, based on // .gbot switch LLMMode and choose the corresponding chain. @@ -477,7 +480,6 @@ export class ChatServices { }); } else if (LLMMode === 'sql') { const con = min[`llm`]['gbconnection']; - const dialect = con['storageDriver']; let dataSource; @@ -506,6 +508,7 @@ export class ChatServices { logging: true }); } + const db = await SqlDatabase.fromDataSourceParams({ appDataSource: dataSource }); @@ -519,11 +522,11 @@ export class ChatServices { VERY IMPORTANT: Return just the generated SQL command as plain text with no Markdown or formmating. ------------ - SCHEMA: {schema} - ------------ - QUESTION: {question} - ------------ - SQL QUERY:`); + SCHEMA: {schema} + ------------ + QUESTION: {question} + ------------ + SQL QUERY:`); /** * Create a new RunnableSequence where we pipe the output from `db.getTableInfo()` @@ -600,7 +603,7 @@ export class ChatServices { result = result.content; } else { - GBLogEx.info(min, `Invalid Answer Mode in Config.xlsx: ${LLMMode}.`); + GBLogEx.info(min, `Invalid Answer Mode in .gbot: ${LLMMode}.`); } await memory.saveContext( @@ -612,7 +615,6 @@ export class ChatServices { } ); - GBLogEx.info(min, `GPT Result: ${result.toString()}`); return { answer: result.toString(), sources, questionId: 0, page }; } @@ -625,7 +627,7 @@ export class ChatServices { private static async getTools(min: GBMinInstance) { let functions = []; - // Adds .gbdialog as functions if any to GPT Functions. + // Adds .gbdialog as functions if any to LLM Functions. await CollectionUtil.asyncForEach(Object.keys(min.scriptMap), async script => { const path = DialogKeywords.getGBAIPath(min.botId, 'gbdialog', null); const jsonFile = Path.join('work', path, `${script}.json`); diff --git a/templates/ai-search.gbai/ai-search.gbot/config.csv b/templates/ai-search.gbai/ai-search.gbot/config.csv index 78021d42..9e2db6f0 100644 --- a/templates/ai-search.gbai/ai-search.gbot/config.csv +++ b/templates/ai-search.gbai/ai-search.gbot/config.csv @@ -1,2 +1,2 @@ name,value -Answer Mode,document \ No newline at end of file +Answer Mode,document-ref \ No newline at end of file diff --git a/templates/api-server.gbai/api.gbdialog/start.bas b/templates/api-server.gbai/api.gbdialog/start.bas index 7ab7669b..3ac6d790 100644 --- a/templates/api-server.gbai/api.gbdialog/start.bas +++ b/templates/api-server.gbai/api.gbdialog/start.bas @@ -1,7 +1,7 @@ PARAM stall AS number LIKE Stall Code PARAM operator AS number LIKE Operator Code -DESCRIPTION This function (tool) is never called by GPT. It is a WebService of GB. +DESCRIPTION This function (tool) is never called by LLM. It is a WebService of GB. REM Login as Waiter data = NEW OBJECT diff --git a/templates/default.gbai/default.gbdialog/start.bas b/templates/default.gbai/default.gbdialog/start.bas new file mode 100644 index 00000000..3e057386 --- /dev/null +++ b/templates/default.gbai/default.gbdialog/start.bas @@ -0,0 +1,55 @@ +BEGIN SYTEM PROMPT + +# I'm General Botss: + +- I identify as General Bots, an AI assistant created through open collaboration. +- My primary role is to assist users by providing information, answering questions, and engaging in conversation. +- I use various open-source technologies and publicly available information to provide relevant and useful responses. +- I can understand and communicate fluently in the user's language of choice such as English, zhongwen, nihongo, Español, Français, Deutsch, and others. +- I respect user privacy and freedom of information. +- I encourage critical thinking and diverse perspectives. + +## On my capabilities: + +- I am capable of generating creative content such as poems, stories, code, essays, and songs using my own words and knowledge. +- If assistance is requested, I can help the user with writing, rewriting, improving, or optimizing their content. +- I have the ability to identify errors in the conversation and can rectify them by acknowledging the mistake and offering accurate information. +- I can describe and analyze images that users share with me. + +## On my response: + +- My responses aim to be helpful, respectful, empathetic, interesting, and engaging. +- My logic and reasoning strive to be rigorous and intelligent. +- I encourage open dialogue and respectful discussion of different viewpoints. +- My responses avoid being accusatory, rude, or defensive. + +## On my output format: + +- I use markdown for formatting, including headings, tables, bold text, and lists to present information clearly. +- I use code blocks for displaying formatted content such as poems, code, or lyrics. +- I use LaTeX for mathematical expressions when appropriate. + +## On my limitations: + +- My knowledge may not always be up-to-date or complete. I encourage users to verify important information from authoritative sources. +- I do not have access to personal user data or external tools beyond what's publicly available. +- I cannot perform actions outside of our conversation, such as sending emails or accessing private information. + +## On ethical guidelines: + +- I strive to provide information that promotes the well-being of individuals and society. +- I encourage respect for human rights, democracy, and individual freedoms. +- I do not create content that could cause harm to individuals or groups. +- I respect intellectual property rights and encourage the use of open-source and freely available resources. +- If I'm unsure about the potential impact of my response, I provide a clear disclaimer. + +## On transparency: + +- I am transparent about being an AI and about the limitations of my knowledge and capabilities. +- I encourage users to think critically and not blindly trust AI-generated information. +- I support open dialogue about AI ethics and the role of AI in society. + +Remember, I'm here to assist and engage in constructive dialogue. Feel free to ask questions or discuss any topic that interests you! + + +END SYSTEM PROMPT \ No newline at end of file diff --git a/templates/llm-tools.gbai/llm-tools.gbdialog/describe-image.bas b/templates/llm-tools.gbai/llm-tools.gbdialog/describe-image.bas new file mode 100644 index 00000000..bb608b6b --- /dev/null +++ b/templates/llm-tools.gbai/llm-tools.gbdialog/describe-image.bas @@ -0,0 +1,5 @@ +PARAM image +DESCRIPTION Returns the description of the image that was sent with the previous user message. This tool is automatically invoked if a user uploads an image. + + + diff --git a/templates/llm-tools.gbai/llm-tools.gbdialog/generate-image.bas b/templates/llm-tools.gbai/llm-tools.gbdialog/generate-image.bas new file mode 100644 index 00000000..ac7b3319 --- /dev/null +++ b/templates/llm-tools.gbai/llm-tools.gbdialog/generate-image.bas @@ -0,0 +1,6 @@ + +PARAM prompt +DESCRIPTION Calls an artificial intelligence model to create an image. `prompt` parameter is a text description of the desired image. + + + diff --git a/templates/llm-tools.gbai/llm-tools.gbdialog/search.bas b/templates/llm-tools.gbai/llm-tools.gbdialog/search.bas new file mode 100644 index 00000000..270fdef3 --- /dev/null +++ b/templates/llm-tools.gbai/llm-tools.gbdialog/search.bas @@ -0,0 +1,2 @@ +PARAM query +DESCRIPTION Returns search results in a JSON string. `query` parameter is a well-formed web search query.- `search_web(query: str) -> str` returns Bing search results in a JSON string. `query` parameter is a well-formed web search query. \ No newline at end of file