feat : 代码梳理 移除所有敏感密钥 通过环境变量方式配置
All checks were successful
git commit AiDA python develop 分支构建部署 / scheduled_deploy (push) Has been skipped

This commit is contained in:
zcr
2025-12-30 16:49:08 +08:00
parent 1be716e414
commit 18024a2d70
167 changed files with 5283 additions and 10464 deletions

View File

@@ -3,27 +3,20 @@ import json
import logging
from typing import Any, Dict, List, Optional, Union, Tuple
from langchain.agents import AgentExecutor
from langchain.callbacks.manager import Callbacks, CallbackManager
from langchain.load.dump import dumpd
from langchain.schema import RUN_KEY, RunInfo
from langchain_classic.agents import AgentExecutor
from langchain_classic.schema import RUN_KEY
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks, CallbackManager
from langchain_core.load import dumpd
from langchain_core.outputs import RunInfo
class CustomAgentExecutor(AgentExecutor):
def __call__(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
session_key: str = "",
*,
tags: Optional[List[str]] = None,
include_run_info: bool = False,
) -> Dict[str, Any]:
def __call__(self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Callbacks = None, session_key: str = "", *, tags: Optional[List[str]] = None, include_run_info: bool = False, **kwargs) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
**kwargs:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
@@ -72,7 +65,7 @@ class CustomAgentExecutor(AgentExecutor):
"""Validate and prep outputs."""
self._validate_outputs(outputs)
if self.memory is not None and outputs['need_record']:
self.memory.save_context(inputs, outputs, session_key)
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
@@ -95,7 +88,7 @@ class CustomAgentExecutor(AgentExecutor):
)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs, session_key)
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
self._validate_inputs(inputs)
return inputs
@@ -119,7 +112,8 @@ class CustomAgentExecutor(AgentExecutor):
{return_value_key: observation},
"",
)
except:
except Exception as e:
print(e)
pass
# Invalid tools won't be in the map, so we return False.

View File

@@ -1,26 +1,15 @@
import json
import re
from dataclasses import dataclass
from json import JSONDecodeError
from typing import List, Tuple, Any, Union
from dataclasses import dataclass
from langchain.callbacks.manager import Callbacks
from langchain.agents import (
OpenAIFunctionsAgent,
)
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
OutputParserException
)
from langchain.schema.messages import (
AIMessage,
FunctionMessage
)
from langchain.tools import BaseTool, StructuredTool
# from langchain.tools.convert_to_openai import FunctionDescription
from langchain.utils.openai_functions import FunctionDescription
from langchain_classic.agents import OpenAIFunctionsAgent
from langchain_community.utils.ernie_functions import FunctionDescription
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import BaseMessage, AIMessage, FunctionMessage
from langchain_core.tools import BaseTool
@dataclass
@@ -76,7 +65,6 @@ def _create_function_message(
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
@@ -177,6 +165,7 @@ class ConversationalFunctionsAgent(OpenAIFunctionsAgent):
into it.
Args:
callbacks:
intermediate_steps: Steps the LLM has taken to date, along with observations
**kwargs: User inputs.
**kwargs: Including user's input string