index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchain/libs/langchain/tests/unit_tests/smith
lc_public_repos/langchain/libs/langchain/tests/unit_tests/smith/evaluation/test_string_run_evaluator.py
"""Tests for the string run evaluator.""" from unittest.mock import MagicMock from langchain.evaluation import criteria from langchain.smith.evaluation.string_run_evaluator import ( ChainStringRunMapper, StringRunEvaluatorChain, ) from tests.unit_tests.llms import fake_llm def test_evaluate_run() -> None: run_mapper = ChainStringRunMapper() string_evaluator = criteria.CriteriaEvalChain.from_llm(fake_llm.FakeLLM()) evaluator = StringRunEvaluatorChain( run_mapper=run_mapper, example_mapper=None, name="test_evaluator", string_evaluator=string_evaluator, ) run = MagicMock() example = MagicMock() res = evaluator.evaluate_run(run, example) assert str(res.comment).startswith("Error evaluating run ") assert res.key == string_evaluator.evaluation_name
0
lc_public_repos/langchain/libs/langchain/tests/unit_tests/smith
lc_public_repos/langchain/libs/langchain/tests/unit_tests/smith/evaluation/test_runner_utils.py
"""Test the LangSmith evaluation helpers.""" import uuid from datetime import datetime from typing import Any, Dict, Iterator, List, Optional, Union from unittest import mock import pytest from freezegun import freeze_time from langchain_core.language_models import BaseLanguageModel from langsmith.client import Client from langsmith.schemas import Dataset, Example from langchain.chains.base import Chain from langchain.chains.transform import TransformChain from langchain.smith.evaluation.runner_utils import ( InputFormatError, _get_messages, _get_prompt, _run_llm, _run_llm_or_chain, _validate_example_inputs_for_chain, _validate_example_inputs_for_language_model, arun_on_dataset, ) from tests.unit_tests.llms.fake_chat_model import FakeChatModel from tests.unit_tests.llms.fake_llm import FakeLLM _CREATED_AT = datetime(2015, 1, 1, 0, 0, 0) _TENANT_ID = "7a3d2b56-cd5b-44e5-846f-7eb6e8144ce4" _EXAMPLE_MESSAGE = { "data": {"content": "Foo", "example": False, "additional_kwargs": {}}, "type": "human", } _VALID_MESSAGES = [ {"messages": [_EXAMPLE_MESSAGE], "other_key": "value"}, {"messages": [], "other_key": "value"}, { "messages": [[_EXAMPLE_MESSAGE, _EXAMPLE_MESSAGE]], "other_key": "value", }, {"any_key": [_EXAMPLE_MESSAGE]}, {"any_key": [[_EXAMPLE_MESSAGE, _EXAMPLE_MESSAGE]]}, ] _VALID_PROMPTS = [ {"prompts": ["foo"], "other_key": "value"}, {"prompt": "foo", "other_key": ["bar", "baz"]}, {"some_key": "foo"}, {"some_key": ["foo"]}, ] _INVALID_PROMPTS = ( [ {"prompts": "foo"}, {"prompt": ["foo"]}, {"some_key": 3}, {"some_key": "foo", "other_key": "bar"}, ], ) @pytest.mark.parametrize( "inputs", _VALID_MESSAGES, ) def test__get_messages_valid(inputs: Dict[str, Any]) -> None: {"messages": []} _get_messages(inputs) @pytest.mark.parametrize( "inputs", _VALID_PROMPTS, ) def test__get_prompts_valid(inputs: Dict[str, Any]) -> None: _get_prompt(inputs) @pytest.mark.parametrize( "inputs", _VALID_PROMPTS, ) def test__validate_example_inputs_for_language_model(inputs: Dict[str, Any]) -> None: mock_ = mock.MagicMock() mock_.inputs = inputs _validate_example_inputs_for_language_model(mock_, None) @pytest.mark.parametrize( "inputs", _INVALID_PROMPTS, ) def test__validate_example_inputs_for_language_model_invalid( inputs: Dict[str, Any], ) -> None: mock_ = mock.MagicMock() mock_.inputs = inputs with pytest.raises(InputFormatError): _validate_example_inputs_for_language_model(mock_, None) def test__validate_example_inputs_for_chain_single_input() -> None: mock_ = mock.MagicMock() mock_.inputs = {"foo": "bar"} chain = mock.MagicMock() chain.input_keys = ["def not foo"] _validate_example_inputs_for_chain(mock_, chain, None) def test__validate_example_inputs_for_chain_input_mapper() -> None: mock_ = mock.MagicMock() mock_.inputs = {"foo": "bar", "baz": "qux"} chain = mock.MagicMock() chain.input_keys = ["not foo", "not baz", "not qux"] def wrong_output_format(inputs: dict) -> str: assert "foo" in inputs assert "baz" in inputs return "hehe" with pytest.raises(InputFormatError, match="must be a dictionary"): _validate_example_inputs_for_chain(mock_, chain, wrong_output_format) def wrong_output_keys(inputs: dict) -> dict: assert "foo" in inputs assert "baz" in inputs return {"not foo": "foo", "not baz": "baz"} with pytest.raises(InputFormatError, match="Missing keys after loading example"): _validate_example_inputs_for_chain(mock_, chain, wrong_output_keys) def input_mapper(inputs: dict) -> dict: assert "foo" in inputs assert "baz" in inputs return {"not foo": inputs["foo"], "not baz": inputs["baz"], "not qux": "qux"} _validate_example_inputs_for_chain(mock_, chain, input_mapper) def test__validate_example_inputs_for_chain_multi_io() -> None: mock_ = mock.MagicMock() mock_.inputs = {"foo": "bar", "baz": "qux"} chain = mock.MagicMock() chain.input_keys = ["foo", "baz"] _validate_example_inputs_for_chain(mock_, chain, None) def test__validate_example_inputs_for_chain_single_input_multi_expect() -> None: mock_ = mock.MagicMock() mock_.inputs = {"foo": "bar"} chain = mock.MagicMock() chain.input_keys = ["def not foo", "oh here is another"] with pytest.raises(InputFormatError, match="Example inputs missing expected"): _validate_example_inputs_for_chain(mock_, chain, None) @pytest.mark.parametrize("inputs", _INVALID_PROMPTS) def test__get_prompts_invalid(inputs: Dict[str, Any]) -> None: with pytest.raises(InputFormatError): _get_prompt(inputs) def test_run_llm_or_chain_with_input_mapper() -> None: example = Example( id=uuid.uuid4(), created_at=_CREATED_AT, inputs={"the wrong input": "1", "another key": "2"}, outputs={"output": "2"}, dataset_id=str(uuid.uuid4()), ) def run_val(inputs: dict) -> dict: assert "the right input" in inputs return {"output": "2"} mock_chain = TransformChain( # type: ignore[call-arg] input_variables=["the right input"], output_variables=["output"], transform=run_val, ) def input_mapper(inputs: dict) -> dict: assert "the wrong input" in inputs return {"the right input": inputs["the wrong input"]} result = _run_llm_or_chain( example, {"callbacks": [], "tags": []}, llm_or_chain_factory=lambda: mock_chain, input_mapper=input_mapper, ) assert result == {"output": "2", "the right input": "1"} bad_result = _run_llm_or_chain( example, {"callbacks": [], "tags": []}, llm_or_chain_factory=lambda: mock_chain ) assert "Error" in bad_result # Try with LLM def llm_input_mapper(inputs: dict) -> str: assert "the wrong input" in inputs return "the right input" mock_llm = FakeLLM(queries={"the right input": "somenumber"}) llm_result = _run_llm_or_chain( example, {"callbacks": [], "tags": []}, llm_or_chain_factory=mock_llm, input_mapper=llm_input_mapper, ) assert isinstance(llm_result, str) assert llm_result == "somenumber" @pytest.mark.parametrize( "inputs", [ {"one_key": [_EXAMPLE_MESSAGE], "other_key": "value"}, { "messages": [[_EXAMPLE_MESSAGE, _EXAMPLE_MESSAGE], _EXAMPLE_MESSAGE], "other_key": "value", }, {"prompts": "foo"}, {}, ], ) def test__get_messages_invalid(inputs: Dict[str, Any]) -> None: with pytest.raises(InputFormatError): _get_messages(inputs) @pytest.mark.parametrize("inputs", _VALID_PROMPTS + _VALID_MESSAGES) def test_run_llm_all_formats(inputs: Dict[str, Any]) -> None: llm = FakeLLM() _run_llm(llm, inputs, mock.MagicMock()) @pytest.mark.parametrize("inputs", _VALID_MESSAGES + _VALID_PROMPTS) def test_run_chat_model_all_formats(inputs: Dict[str, Any]) -> None: llm = FakeChatModel() _run_llm(llm, inputs, mock.MagicMock()) @freeze_time("2023-01-01") async def test_arun_on_dataset(monkeypatch: pytest.MonkeyPatch) -> None: dataset = Dataset( id=uuid.uuid4(), name="test", description="Test dataset", owner_id="owner", created_at=_CREATED_AT, tenant_id=_TENANT_ID, _host_url="http://localhost:1984", ) uuids = [ "0c193153-2309-4704-9a47-17aee4fb25c8", "0d11b5fd-8e66-4485-b696-4b55155c0c05", "90d696f0-f10d-4fd0-b88b-bfee6df08b84", "4ce2c6d8-5124-4c0c-8292-db7bdebcf167", "7b5a524c-80fa-4960-888e-7d380f9a11ee", ] examples = [ Example( id=uuids[0], created_at=_CREATED_AT, inputs={"input": "1"}, outputs={"output": "2"}, dataset_id=str(uuid.uuid4()), ), Example( id=uuids[1], created_at=_CREATED_AT, inputs={"input": "3"}, outputs={"output": "4"}, dataset_id=str(uuid.uuid4()), ), Example( id=uuids[2], created_at=_CREATED_AT, inputs={"input": "5"}, outputs={"output": "6"}, dataset_id=str(uuid.uuid4()), ), Example( id=uuids[3], created_at=_CREATED_AT, inputs={"input": "7"}, outputs={"output": "8"}, dataset_id=str(uuid.uuid4()), ), Example( id=uuids[4], created_at=_CREATED_AT, inputs={"input": "9"}, outputs={"output": "10"}, dataset_id=str(uuid.uuid4()), ), ] def mock_read_dataset(*args: Any, **kwargs: Any) -> Dataset: return dataset def mock_list_examples(*args: Any, **kwargs: Any) -> Iterator[Example]: return iter(examples) async def mock_arun_chain( example: Example, llm_or_chain: Union[BaseLanguageModel, Chain], tags: Optional[List[str]] = None, callbacks: Optional[Any] = None, **kwargs: Any, ) -> Dict[str, Any]: return {"result": f"Result for example {example.id}"} def mock_create_project(*args: Any, **kwargs: Any) -> Any: proj = mock.MagicMock() proj.id = "123" return proj with mock.patch.object( Client, "read_dataset", new=mock_read_dataset ), mock.patch.object(Client, "list_examples", new=mock_list_examples), mock.patch( "langchain.smith.evaluation.runner_utils._arun_llm_or_chain", new=mock_arun_chain, ), mock.patch.object(Client, "create_project", new=mock_create_project): client = Client(api_url="http://localhost:1984", api_key="123") chain = mock.MagicMock() chain.input_keys = ["foothing"] results = await arun_on_dataset( dataset_name="test", llm_or_chain_factory=lambda: chain, concurrency_level=2, project_name="test_project", client=client, ) expected = { str(example.id): { "output": { "result": f"Result for example {uuid.UUID(str(example.id))}" }, "input": {"input": example.inputs["input"]}, "reference": { "output": example.outputs["output"] if example.outputs is not None else None }, "feedback": [], # No run since we mock the call to the llm above "execution_time": None, "run_id": None, } for example in examples } assert results["results"] == expected
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/cache.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.cache import ( AstraDBCache, AstraDBSemanticCache, AzureCosmosDBSemanticCache, CassandraCache, CassandraSemanticCache, FullLLMCache, FullMd5LLMCache, GPTCache, InMemoryCache, MomentoCache, RedisCache, RedisSemanticCache, SQLAlchemyCache, SQLAlchemyMd5Cache, SQLiteCache, UpstashRedisCache, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "FullLLMCache": "langchain_community.cache", "SQLAlchemyCache": "langchain_community.cache", "SQLiteCache": "langchain_community.cache", "UpstashRedisCache": "langchain_community.cache", "RedisCache": "langchain_community.cache", "RedisSemanticCache": "langchain_community.cache", "GPTCache": "langchain_community.cache", "MomentoCache": "langchain_community.cache", "InMemoryCache": "langchain_community.cache", "CassandraCache": "langchain_community.cache", "CassandraSemanticCache": "langchain_community.cache", "FullMd5LLMCache": "langchain_community.cache", "SQLAlchemyMd5Cache": "langchain_community.cache", "AstraDBCache": "langchain_community.cache", "AstraDBSemanticCache": "langchain_community.cache", "AzureCosmosDBSemanticCache": "langchain_community.cache", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "FullLLMCache", "SQLAlchemyCache", "SQLiteCache", "UpstashRedisCache", "RedisCache", "RedisSemanticCache", "GPTCache", "MomentoCache", "InMemoryCache", "CassandraCache", "CassandraSemanticCache", "FullMd5LLMCache", "SQLAlchemyMd5Cache", "AstraDBCache", "AstraDBSemanticCache", "AzureCosmosDBSemanticCache", ]
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/requests.py
"""DEPRECATED: Kept for backwards compatibility.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.utilities import ( Requests, RequestsWrapper, TextRequestsWrapper, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "Requests": "langchain_community.utilities", "RequestsWrapper": "langchain_community.utilities", "TextRequestsWrapper": "langchain_community.utilities", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "Requests", "RequestsWrapper", "TextRequestsWrapper", ]
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/env.py
import platform from functools import lru_cache @lru_cache(maxsize=1) def get_runtime_environment() -> dict: """Get information about the LangChain runtime environment.""" # Lazy import to avoid circular imports from langchain import __version__ return { "library_version": __version__, "library": "langchain", "platform": platform.platform(), "runtime": "python", "runtime_version": platform.python_version(), }
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/hub.py
"""Interface with the LangChain Hub.""" from __future__ import annotations import json from typing import Any, Optional, Sequence from langchain_core.load.dump import dumps from langchain_core.load.load import loads from langchain_core.prompts import BasePromptTemplate def _get_client( api_key: Optional[str] = None, api_url: Optional[str] = None, ) -> Any: try: from langsmith import Client as LangSmithClient ls_client = LangSmithClient(api_url, api_key=api_key) if hasattr(ls_client, "push_prompt") and hasattr(ls_client, "pull_prompt"): return ls_client else: from langchainhub import Client as LangChainHubClient return LangChainHubClient(api_url, api_key=api_key) except ImportError: try: from langchainhub import Client as LangChainHubClient return LangChainHubClient(api_url, api_key=api_key) except ImportError as e: raise ImportError( "Could not import langsmith or langchainhub (deprecated)," "please install with `pip install langsmith`." ) from e def push( repo_full_name: str, object: Any, *, api_url: Optional[str] = None, api_key: Optional[str] = None, parent_commit_hash: Optional[str] = None, new_repo_is_public: bool = False, new_repo_description: Optional[str] = None, readme: Optional[str] = None, tags: Optional[Sequence[str]] = None, ) -> str: """ Push an object to the hub and returns the URL it can be viewed at in a browser. :param repo_full_name: The full name of the prompt to push to in the format of `owner/prompt_name` or `prompt_name`. :param object: The LangChain to serialize and push to the hub. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. :param parent_commit_hash: The commit hash of the parent commit to push to. Defaults to the latest commit automatically. :param new_repo_is_public: Whether the prompt should be public. Defaults to False (Private by default). :param new_repo_description: The description of the prompt. Defaults to an empty string. """ client = _get_client(api_key=api_key, api_url=api_url) # Then it's langsmith if hasattr(client, "push_prompt"): return client.push_prompt( repo_full_name, object=object, parent_commit_hash=parent_commit_hash, is_public=new_repo_is_public, description=new_repo_description, readme=readme, tags=tags, ) # Then it's langchainhub manifest_json = dumps(object) message = client.push( repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash, new_repo_is_public=new_repo_is_public, new_repo_description=new_repo_description, ) return message def pull( owner_repo_commit: str, *, include_model: Optional[bool] = None, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> Any: """ Pull an object from the hub and returns it as a LangChain object. :param owner_repo_commit: The full name of the prompt to pull from in the format of `owner/prompt_name:commit_hash` or `owner/prompt_name` or just `prompt_name` if it's your own prompt. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. """ client = _get_client(api_key=api_key, api_url=api_url) # Then it's langsmith if hasattr(client, "pull_prompt"): response = client.pull_prompt(owner_repo_commit, include_model=include_model) return response # Then it's langchainhub if hasattr(client, "pull_repo"): # >= 0.1.15 res_dict = client.pull_repo(owner_repo_commit) obj = loads(json.dumps(res_dict["manifest"])) if isinstance(obj, BasePromptTemplate): if obj.metadata is None: obj.metadata = {} obj.metadata["lc_hub_owner"] = res_dict["owner"] obj.metadata["lc_hub_repo"] = res_dict["repo"] obj.metadata["lc_hub_commit_hash"] = res_dict["commit_hash"] return obj # Then it's < 0.1.15 langchainhub resp: str = client.pull(owner_repo_commit) return loads(resp)
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/text_splitter.py
"""Kept for backwards compatibility.""" from langchain_text_splitters import ( Language, RecursiveCharacterTextSplitter, TextSplitter, Tokenizer, TokenTextSplitter, ) from langchain_text_splitters.base import split_text_on_tokens from langchain_text_splitters.character import CharacterTextSplitter from langchain_text_splitters.html import ElementType, HTMLHeaderTextSplitter from langchain_text_splitters.json import RecursiveJsonSplitter from langchain_text_splitters.konlpy import KonlpyTextSplitter from langchain_text_splitters.latex import LatexTextSplitter from langchain_text_splitters.markdown import ( HeaderType, LineType, MarkdownHeaderTextSplitter, MarkdownTextSplitter, ) from langchain_text_splitters.nltk import NLTKTextSplitter from langchain_text_splitters.python import PythonCodeTextSplitter from langchain_text_splitters.sentence_transformers import ( SentenceTransformersTokenTextSplitter, ) from langchain_text_splitters.spacy import SpacyTextSplitter __all__ = [ "TokenTextSplitter", "TextSplitter", "Tokenizer", "Language", "RecursiveCharacterTextSplitter", "RecursiveJsonSplitter", "LatexTextSplitter", "PythonCodeTextSplitter", "KonlpyTextSplitter", "SpacyTextSplitter", "NLTKTextSplitter", "split_text_on_tokens", "SentenceTransformersTokenTextSplitter", "ElementType", "HeaderType", "LineType", "HTMLHeaderTextSplitter", "MarkdownHeaderTextSplitter", "MarkdownTextSplitter", "CharacterTextSplitter", ]
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/model_laboratory.py
"""Experiment with different models.""" from __future__ import annotations from typing import List, Optional, Sequence from langchain_core.language_models.llms import BaseLLM from langchain_core.prompts.prompt import PromptTemplate from langchain_core.utils.input import get_color_mapping, print_text from langchain.chains.base import Chain from langchain.chains.llm import LLMChain class ModelLaboratory: """Experiment with different models.""" def __init__(self, chains: Sequence[Chain], names: Optional[List[str]] = None): """Initialize with chains to experiment with. Args: chains: list of chains to experiment with. """ for chain in chains: if not isinstance(chain, Chain): raise ValueError( "ModelLaboratory should now be initialized with Chains. " "If you want to initialize with LLMs, use the `from_llms` method " "instead (`ModelLaboratory.from_llms(...)`)" ) if len(chain.input_keys) != 1: raise ValueError( "Currently only support chains with one input variable, " f"got {chain.input_keys}" ) if len(chain.output_keys) != 1: raise ValueError( "Currently only support chains with one output variable, " f"got {chain.output_keys}" ) if names is not None: if len(names) != len(chains): raise ValueError("Length of chains does not match length of names.") self.chains = chains chain_range = [str(i) for i in range(len(self.chains))] self.chain_colors = get_color_mapping(chain_range) self.names = names @classmethod def from_llms( cls, llms: List[BaseLLM], prompt: Optional[PromptTemplate] = None ) -> ModelLaboratory: """Initialize with LLMs to experiment with and optional prompt. Args: llms: list of LLMs to experiment with prompt: Optional prompt to use to prompt the LLMs. Defaults to None. If a prompt was provided, it should only have one input variable. """ if prompt is None: prompt = PromptTemplate(input_variables=["_input"], template="{_input}") chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms] names = [str(llm) for llm in llms] return cls(chains, names=names) def compare(self, text: str) -> None: """Compare model outputs on an input text. If a prompt was provided with starting the laboratory, then this text will be fed into the prompt. If no prompt was provided, then the input text is the entire prompt. Args: text: input text to run all models on. """ print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201 for i, chain in enumerate(self.chains): if self.names is not None: name = self.names[i] else: name = str(chain) print_text(name, end="\n") output = chain.run(text) print_text(output, color=self.chain_colors[str(i)], end="\n\n")
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/sql_database.py
"""Keep here for backwards compatibility.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.utilities import SQLDatabase # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"SQLDatabase": "langchain_community.utilities"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "SQLDatabase", ]
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/python.py
"""For backwards compatibility.""" from typing import Any from langchain._api import create_importer # Code has been removed from the community package as well. # We'll proxy to community package, which will raise an appropriate exception, # but we'll not include this in __all__, so it won't be listed as importable. _importer = create_importer( __package__, deprecated_lookups={"PythonREPL": "langchain_community.utilities.python"}, ) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _importer(name)
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/example_generator.py
"""Keep here for backwards compatibility.""" from langchain.chains.example_generator import generate_example __all__ = ["generate_example"]
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/formatting.py
"""DEPRECATED: Kept for backwards compatibility.""" from langchain_core.utils.formatting import StrictFormatter, formatter __all__ = ["StrictFormatter", "formatter"]
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/input.py
"""DEPRECATED: Kept for backwards compatibility.""" from langchain_core.utils.input import ( get_bolded_text, get_color_mapping, get_colored_text, print_text, ) __all__ = [ "get_bolded_text", "get_color_mapping", "get_colored_text", "print_text", ]
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/globals.py
"""Global values and configuration that apply to all of LangChain.""" import warnings from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from langchain_core.caches import BaseCache # DO NOT USE THESE VALUES DIRECTLY! # Use them only via `get_<X>()` and `set_<X>()` below, # or else your code may behave unexpectedly with other uses of these global settings: # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 _verbose: bool = False _debug: bool = False _llm_cache: Optional["BaseCache"] = None def set_verbose(value: bool) -> None: """Set a new value for the `verbose` global setting.""" import langchain # We're about to run some deprecated code, don't report warnings from it. # The user called the correct (non-deprecated) code path and shouldn't get warnings. with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=( "Importing verbose from langchain root module is no longer supported" ), ) # N.B.: This is a workaround for an unfortunate quirk of Python's # module-level `__getattr__()` implementation: # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 # # Remove it once `langchain.verbose` is no longer supported, and once all users # have migrated to using `set_verbose()` here. langchain.verbose = value global _verbose _verbose = value def get_verbose() -> bool: """Get the value of the `verbose` global setting.""" import langchain # We're about to run some deprecated code, don't report warnings from it. # The user called the correct (non-deprecated) code path and shouldn't get warnings. with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=( "Importing verbose from langchain root module is no longer supported" ), ) # N.B.: This is a workaround for an unfortunate quirk of Python's # module-level `__getattr__()` implementation: # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 # # Remove it once `langchain.verbose` is no longer supported, and once all users # have migrated to using `set_verbose()` here. # # In the meantime, the `verbose` setting is considered True if either the old # or the new value are True. This accommodates users who haven't migrated # to using `set_verbose()` yet. Those users are getting deprecation warnings # directing them to use `set_verbose()` when they import `langhchain.verbose`. old_verbose = langchain.verbose global _verbose return _verbose or old_verbose def set_debug(value: bool) -> None: """Set a new value for the `debug` global setting.""" import langchain # We're about to run some deprecated code, don't report warnings from it. # The user called the correct (non-deprecated) code path and shouldn't get warnings. with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="Importing debug from langchain root module is no longer supported", ) # N.B.: This is a workaround for an unfortunate quirk of Python's # module-level `__getattr__()` implementation: # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 # # Remove it once `langchain.debug` is no longer supported, and once all users # have migrated to using `set_debug()` here. langchain.debug = value global _debug _debug = value def get_debug() -> bool: """Get the value of the `debug` global setting.""" import langchain # We're about to run some deprecated code, don't report warnings from it. # The user called the correct (non-deprecated) code path and shouldn't get warnings. with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="Importing debug from langchain root module is no longer supported", ) # N.B.: This is a workaround for an unfortunate quirk of Python's # module-level `__getattr__()` implementation: # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 # # Remove it once `langchain.debug` is no longer supported, and once all users # have migrated to using `set_debug()` here. # # In the meantime, the `debug` setting is considered True if either the old # or the new value are True. This accommodates users who haven't migrated # to using `set_debug()` yet. Those users are getting deprecation warnings # directing them to use `set_debug()` when they import `langhchain.debug`. old_debug = langchain.debug global _debug return _debug or old_debug def set_llm_cache(value: Optional["BaseCache"]) -> None: """Set a new LLM cache, overwriting the previous value, if any.""" import langchain # We're about to run some deprecated code, don't report warnings from it. # The user called the correct (non-deprecated) code path and shouldn't get warnings. with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=( "Importing llm_cache from langchain root module is no longer supported" ), ) # N.B.: This is a workaround for an unfortunate quirk of Python's # module-level `__getattr__()` implementation: # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 # # Remove it once `langchain.llm_cache` is no longer supported, and # once all users have migrated to using `set_llm_cache()` here. langchain.llm_cache = value global _llm_cache _llm_cache = value def get_llm_cache() -> "BaseCache": """Get the value of the `llm_cache` global setting.""" import langchain # We're about to run some deprecated code, don't report warnings from it. # The user called the correct (non-deprecated) code path and shouldn't get warnings. with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=( "Importing llm_cache from langchain root module is no longer supported" ), ) # N.B.: This is a workaround for an unfortunate quirk of Python's # module-level `__getattr__()` implementation: # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 # # Remove it once `langchain.llm_cache` is no longer supported, and # once all users have migrated to using `set_llm_cache()` here. # # In the meantime, the `llm_cache` setting returns whichever of # its two backing sources is truthy (not `None` and non-empty), # or the old value if both are falsy. This accommodates users # who haven't migrated to using `set_llm_cache()` yet. # Those users are getting deprecation warnings directing them # to use `set_llm_cache()` when they import `langhchain.llm_cache`. old_llm_cache = langchain.llm_cache global _llm_cache return _llm_cache or old_llm_cache
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/base_language.py
"""Deprecated module for BaseLanguageModel class, kept for backwards compatibility.""" from __future__ import annotations from langchain_core.language_models import BaseLanguageModel __all__ = ["BaseLanguageModel"]
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/__init__.py
# ruff: noqa: E402 """Main entrypoint into package.""" import warnings from importlib import metadata from typing import Any, Optional from langchain_core._api.deprecation import surface_langchain_deprecation_warnings try: __version__ = metadata.version(__package__) except metadata.PackageNotFoundError: # Case where package metadata is not available. __version__ = "" del metadata # optional, avoids polluting the results of dir(__package__) def _warn_on_import(name: str, replacement: Optional[str] = None) -> None: """Warn on import of deprecated module.""" from langchain._api.interactive_env import is_interactive_env if is_interactive_env(): # No warnings for interactive environments. # This is done to avoid polluting the output of interactive environments # where users rely on auto-complete and may trigger this warning # even if they are not using any deprecated modules return if replacement: warnings.warn( f"Importing {name} from langchain root module is no longer supported. " f"Please use {replacement} instead." ) else: warnings.warn( f"Importing {name} from langchain root module is no longer supported." ) # Surfaces Deprecation and Pending Deprecation warnings from langchain. surface_langchain_deprecation_warnings() def __getattr__(name: str) -> Any: if name == "MRKLChain": from langchain.agents import MRKLChain _warn_on_import(name, replacement="langchain.agents.MRKLChain") return MRKLChain elif name == "ReActChain": from langchain.agents import ReActChain _warn_on_import(name, replacement="langchain.agents.ReActChain") return ReActChain elif name == "SelfAskWithSearchChain": from langchain.agents import SelfAskWithSearchChain _warn_on_import(name, replacement="langchain.agents.SelfAskWithSearchChain") return SelfAskWithSearchChain elif name == "ConversationChain": from langchain.chains import ConversationChain _warn_on_import(name, replacement="langchain.chains.ConversationChain") return ConversationChain elif name == "LLMBashChain": raise ImportError( "This module has been moved to langchain-experimental. " "For more details: " "https://github.com/langchain-ai/langchain/discussions/11352." "To access this code, install it with `pip install langchain-experimental`." "`from langchain_experimental.llm_bash.base " "import LLMBashChain`" ) elif name == "LLMChain": from langchain.chains import LLMChain _warn_on_import(name, replacement="langchain.chains.LLMChain") return LLMChain elif name == "LLMCheckerChain": from langchain.chains import LLMCheckerChain _warn_on_import(name, replacement="langchain.chains.LLMCheckerChain") return LLMCheckerChain elif name == "LLMMathChain": from langchain.chains import LLMMathChain _warn_on_import(name, replacement="langchain.chains.LLMMathChain") return LLMMathChain elif name == "QAWithSourcesChain": from langchain.chains import QAWithSourcesChain _warn_on_import(name, replacement="langchain.chains.QAWithSourcesChain") return QAWithSourcesChain elif name == "VectorDBQA": from langchain.chains import VectorDBQA _warn_on_import(name, replacement="langchain.chains.VectorDBQA") return VectorDBQA elif name == "VectorDBQAWithSourcesChain": from langchain.chains import VectorDBQAWithSourcesChain _warn_on_import(name, replacement="langchain.chains.VectorDBQAWithSourcesChain") return VectorDBQAWithSourcesChain elif name == "InMemoryDocstore": from langchain_community.docstore import InMemoryDocstore _warn_on_import(name, replacement="langchain.docstore.InMemoryDocstore") return InMemoryDocstore elif name == "Wikipedia": from langchain_community.docstore import Wikipedia _warn_on_import(name, replacement="langchain.docstore.Wikipedia") return Wikipedia elif name == "Anthropic": from langchain_community.llms import Anthropic _warn_on_import(name, replacement="langchain_community.llms.Anthropic") return Anthropic elif name == "Banana": from langchain_community.llms import Banana _warn_on_import(name, replacement="langchain_community.llms.Banana") return Banana elif name == "CerebriumAI": from langchain_community.llms import CerebriumAI _warn_on_import(name, replacement="langchain_community.llms.CerebriumAI") return CerebriumAI elif name == "Cohere": from langchain_community.llms import Cohere _warn_on_import(name, replacement="langchain_community.llms.Cohere") return Cohere elif name == "ForefrontAI": from langchain_community.llms import ForefrontAI _warn_on_import(name, replacement="langchain_community.llms.ForefrontAI") return ForefrontAI elif name == "GooseAI": from langchain_community.llms import GooseAI _warn_on_import(name, replacement="langchain_community.llms.GooseAI") return GooseAI elif name == "HuggingFaceHub": from langchain_community.llms import HuggingFaceHub _warn_on_import(name, replacement="langchain_community.llms.HuggingFaceHub") return HuggingFaceHub elif name == "HuggingFaceTextGenInference": from langchain_community.llms import HuggingFaceTextGenInference _warn_on_import( name, replacement="langchain_community.llms.HuggingFaceTextGenInference" ) return HuggingFaceTextGenInference elif name == "LlamaCpp": from langchain_community.llms import LlamaCpp _warn_on_import(name, replacement="langchain_community.llms.LlamaCpp") return LlamaCpp elif name == "Modal": from langchain_community.llms import Modal _warn_on_import(name, replacement="langchain_community.llms.Modal") return Modal elif name == "OpenAI": from langchain_community.llms import OpenAI _warn_on_import(name, replacement="langchain_community.llms.OpenAI") return OpenAI elif name == "Petals": from langchain_community.llms import Petals _warn_on_import(name, replacement="langchain_community.llms.Petals") return Petals elif name == "PipelineAI": from langchain_community.llms import PipelineAI _warn_on_import(name, replacement="langchain_community.llms.PipelineAI") return PipelineAI elif name == "SagemakerEndpoint": from langchain_community.llms import SagemakerEndpoint _warn_on_import(name, replacement="langchain_community.llms.SagemakerEndpoint") return SagemakerEndpoint elif name == "StochasticAI": from langchain_community.llms import StochasticAI _warn_on_import(name, replacement="langchain_community.llms.StochasticAI") return StochasticAI elif name == "Writer": from langchain_community.llms import Writer _warn_on_import(name, replacement="langchain_community.llms.Writer") return Writer elif name == "HuggingFacePipeline": from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline _warn_on_import( name, replacement="langchain_community.llms.huggingface_pipeline.HuggingFacePipeline", ) return HuggingFacePipeline elif name == "FewShotPromptTemplate": from langchain_core.prompts import FewShotPromptTemplate _warn_on_import( name, replacement="langchain_core.prompts.FewShotPromptTemplate" ) return FewShotPromptTemplate elif name == "Prompt": from langchain_core.prompts import PromptTemplate _warn_on_import(name, replacement="langchain_core.prompts.PromptTemplate") # it's renamed as prompt template anyways # this is just for backwards compat return PromptTemplate elif name == "PromptTemplate": from langchain_core.prompts import PromptTemplate _warn_on_import(name, replacement="langchain_core.prompts.PromptTemplate") return PromptTemplate elif name == "BasePromptTemplate": from langchain_core.prompts import BasePromptTemplate _warn_on_import(name, replacement="langchain_core.prompts.BasePromptTemplate") return BasePromptTemplate elif name == "ArxivAPIWrapper": from langchain_community.utilities import ArxivAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.ArxivAPIWrapper" ) return ArxivAPIWrapper elif name == "GoldenQueryAPIWrapper": from langchain_community.utilities import GoldenQueryAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.GoldenQueryAPIWrapper" ) return GoldenQueryAPIWrapper elif name == "GoogleSearchAPIWrapper": from langchain_community.utilities import GoogleSearchAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.GoogleSearchAPIWrapper" ) return GoogleSearchAPIWrapper elif name == "GoogleSerperAPIWrapper": from langchain_community.utilities import GoogleSerperAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.GoogleSerperAPIWrapper" ) return GoogleSerperAPIWrapper elif name == "PowerBIDataset": from langchain_community.utilities import PowerBIDataset _warn_on_import( name, replacement="langchain_community.utilities.PowerBIDataset" ) return PowerBIDataset elif name == "SearxSearchWrapper": from langchain_community.utilities import SearxSearchWrapper _warn_on_import( name, replacement="langchain_community.utilities.SearxSearchWrapper" ) return SearxSearchWrapper elif name == "WikipediaAPIWrapper": from langchain_community.utilities import WikipediaAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.WikipediaAPIWrapper" ) return WikipediaAPIWrapper elif name == "WolframAlphaAPIWrapper": from langchain_community.utilities import WolframAlphaAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.WolframAlphaAPIWrapper" ) return WolframAlphaAPIWrapper elif name == "SQLDatabase": from langchain_community.utilities import SQLDatabase _warn_on_import(name, replacement="langchain_community.utilities.SQLDatabase") return SQLDatabase elif name == "FAISS": from langchain_community.vectorstores import FAISS _warn_on_import(name, replacement="langchain_community.vectorstores.FAISS") return FAISS elif name == "ElasticVectorSearch": from langchain_community.vectorstores import ElasticVectorSearch _warn_on_import( name, replacement="langchain_community.vectorstores.ElasticVectorSearch" ) return ElasticVectorSearch # For backwards compatibility elif name == "SerpAPIChain" or name == "SerpAPIWrapper": from langchain_community.utilities import SerpAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.SerpAPIWrapper" ) return SerpAPIWrapper elif name == "verbose": from langchain.globals import _verbose _warn_on_import( name, replacement=( "langchain.globals.set_verbose() / langchain.globals.get_verbose()" ), ) return _verbose elif name == "debug": from langchain.globals import _debug _warn_on_import( name, replacement=( "langchain.globals.set_debug() / langchain.globals.get_debug()" ), ) return _debug elif name == "llm_cache": from langchain.globals import _llm_cache _warn_on_import( name, replacement=( "langchain.globals.set_llm_cache() / langchain.globals.get_llm_cache()" ), ) return _llm_cache else: raise AttributeError(f"Could not find: {name}") __all__ = [ "LLMChain", "LLMCheckerChain", "LLMMathChain", "ArxivAPIWrapper", "GoldenQueryAPIWrapper", "SelfAskWithSearchChain", "SerpAPIWrapper", "SerpAPIChain", "SearxSearchWrapper", "GoogleSearchAPIWrapper", "GoogleSerperAPIWrapper", "WolframAlphaAPIWrapper", "WikipediaAPIWrapper", "Anthropic", "Banana", "CerebriumAI", "Cohere", "ForefrontAI", "GooseAI", "Modal", "OpenAI", "Petals", "PipelineAI", "StochasticAI", "Writer", "BasePromptTemplate", "Prompt", "FewShotPromptTemplate", "PromptTemplate", "ReActChain", "Wikipedia", "HuggingFaceHub", "SagemakerEndpoint", "HuggingFacePipeline", "SQLDatabase", "PowerBIDataset", "FAISS", "MRKLChain", "VectorDBQA", "ElasticVectorSearch", "InMemoryDocstore", "ConversationChain", "VectorDBQAWithSourcesChain", "QAWithSourcesChain", "LlamaCpp", "HuggingFaceTextGenInference", ]
0
lc_public_repos/langchain/libs/langchain
lc_public_repos/langchain/libs/langchain/langchain/serpapi.py
"""For backwards compatibility.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.utilities import SerpAPIWrapper # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"SerpAPIWrapper": "langchain_community.utilities"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "SerpAPIWrapper", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/rdf_graph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import RdfGraph # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"RdfGraph": "langchain_community.graphs"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "RdfGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/hugegraph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import HugeGraph # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"HugeGraph": "langchain_community.graphs"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "HugeGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/nebula_graph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import NebulaGraph # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"NebulaGraph": "langchain_community.graphs"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "NebulaGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/kuzu_graph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import KuzuGraph # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"KuzuGraph": "langchain_community.graphs"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "KuzuGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/graph_document.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs.graph_document import ( GraphDocument, Node, Relationship, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "Node": "langchain_community.graphs.graph_document", "Relationship": "langchain_community.graphs.graph_document", "GraphDocument": "langchain_community.graphs.graph_document", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "Node", "Relationship", "GraphDocument", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/networkx_graph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import NetworkxEntityGraph from langchain_community.graphs.networkx_graph import ( KnowledgeTriple, get_entities, parse_triples, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "KnowledgeTriple": "langchain_community.graphs.networkx_graph", "parse_triples": "langchain_community.graphs.networkx_graph", "get_entities": "langchain_community.graphs.networkx_graph", "NetworkxEntityGraph": "langchain_community.graphs", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "KnowledgeTriple", "parse_triples", "get_entities", "NetworkxEntityGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/falkordb_graph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import FalkorDBGraph # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"FalkorDBGraph": "langchain_community.graphs"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "FalkorDBGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/arangodb_graph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import ArangoGraph from langchain_community.graphs.arangodb_graph import get_arangodb_client # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "ArangoGraph": "langchain_community.graphs", "get_arangodb_client": "langchain_community.graphs.arangodb_graph", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "ArangoGraph", "get_arangodb_client", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/memgraph_graph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import MemgraphGraph # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"MemgraphGraph": "langchain_community.graphs"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "MemgraphGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/__init__.py
"""**Graphs** provide a natural language interface to graph databases.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import ( ArangoGraph, FalkorDBGraph, HugeGraph, KuzuGraph, MemgraphGraph, NebulaGraph, Neo4jGraph, NeptuneGraph, NetworkxEntityGraph, RdfGraph, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "MemgraphGraph": "langchain_community.graphs", "NetworkxEntityGraph": "langchain_community.graphs", "Neo4jGraph": "langchain_community.graphs", "NebulaGraph": "langchain_community.graphs", "NeptuneGraph": "langchain_community.graphs", "KuzuGraph": "langchain_community.graphs", "HugeGraph": "langchain_community.graphs", "RdfGraph": "langchain_community.graphs", "ArangoGraph": "langchain_community.graphs", "FalkorDBGraph": "langchain_community.graphs", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "MemgraphGraph", "NetworkxEntityGraph", "Neo4jGraph", "NebulaGraph", "NeptuneGraph", "KuzuGraph", "HugeGraph", "RdfGraph", "ArangoGraph", "FalkorDBGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/neptune_graph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import NeptuneGraph # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"NeptuneGraph": "langchain_community.graphs"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "NeptuneGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/neo4j_graph.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import Neo4jGraph # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"Neo4jGraph": "langchain_community.graphs"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "Neo4jGraph", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/graphs/graph_store.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs.graph_store import GraphStore # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"GraphStore": "langchain_community.graphs.graph_store"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "GraphStore", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_iterator.py
from __future__ import annotations import asyncio import logging import time from typing import ( TYPE_CHECKING, Any, AsyncIterator, Dict, Iterator, List, Optional, Tuple, Union, ) from uuid import UUID from langchain_core.agents import ( AgentAction, AgentFinish, AgentStep, ) from langchain_core.callbacks import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks, ) from langchain_core.load.dump import dumpd from langchain_core.outputs import RunInfo from langchain_core.runnables.utils import AddableDict from langchain_core.tools import BaseTool from langchain_core.utils.input import get_color_mapping from langchain.schema import RUN_KEY from langchain.utilities.asyncio import asyncio_timeout if TYPE_CHECKING: from langchain.agents.agent import AgentExecutor, NextStepOutput logger = logging.getLogger(__name__) class AgentExecutorIterator: """Iterator for AgentExecutor.""" def __init__( self, agent_executor: AgentExecutor, inputs: Any, callbacks: Callbacks = None, *, tags: Optional[list[str]] = None, metadata: Optional[Dict[str, Any]] = None, run_name: Optional[str] = None, run_id: Optional[UUID] = None, include_run_info: bool = False, yield_actions: bool = False, ): """ Initialize the AgentExecutorIterator with the given AgentExecutor, inputs, and optional callbacks. Args: agent_executor (AgentExecutor): The AgentExecutor to iterate over. inputs (Any): The inputs to the AgentExecutor. callbacks (Callbacks, optional): The callbacks to use during iteration. Defaults to None. tags (Optional[list[str]], optional): The tags to use during iteration. Defaults to None. metadata (Optional[Dict[str, Any]], optional): The metadata to use during iteration. Defaults to None. run_name (Optional[str], optional): The name of the run. Defaults to None. run_id (Optional[UUID], optional): The ID of the run. Defaults to None. include_run_info (bool, optional): Whether to include run info in the output. Defaults to False. yield_actions (bool, optional): Whether to yield actions as they are generated. Defaults to False. """ self._agent_executor = agent_executor self.inputs = inputs self.callbacks = callbacks self.tags = tags self.metadata = metadata self.run_name = run_name self.run_id = run_id self.include_run_info = include_run_info self.yield_actions = yield_actions self.reset() _inputs: Dict[str, str] callbacks: Callbacks tags: Optional[list[str]] metadata: Optional[Dict[str, Any]] run_name: Optional[str] run_id: Optional[UUID] include_run_info: bool yield_actions: bool @property def inputs(self) -> Dict[str, str]: """The inputs to the AgentExecutor.""" return self._inputs @inputs.setter def inputs(self, inputs: Any) -> None: self._inputs = self.agent_executor.prep_inputs(inputs) @property def agent_executor(self) -> AgentExecutor: """The AgentExecutor to iterate over.""" return self._agent_executor @agent_executor.setter def agent_executor(self, agent_executor: AgentExecutor) -> None: self._agent_executor = agent_executor # force re-prep inputs in case agent_executor's prep_inputs fn changed self.inputs = self.inputs @property def name_to_tool_map(self) -> Dict[str, BaseTool]: """A mapping of tool names to tools.""" return {tool.name: tool for tool in self.agent_executor.tools} @property def color_mapping(self) -> Dict[str, str]: """A mapping of tool names to colors.""" return get_color_mapping( [tool.name for tool in self.agent_executor.tools], excluded_colors=["green", "red"], ) def reset(self) -> None: """ Reset the iterator to its initial state, clearing intermediate steps, iterations, and time elapsed. """ logger.debug("(Re)setting AgentExecutorIterator to fresh state") self.intermediate_steps: list[tuple[AgentAction, str]] = [] self.iterations = 0 # maybe better to start these on the first __anext__ call? self.time_elapsed = 0.0 self.start_time = time.time() def update_iterations(self) -> None: """ Increment the number of iterations and update the time elapsed. """ self.iterations += 1 self.time_elapsed = time.time() - self.start_time logger.debug( f"Agent Iterations: {self.iterations} ({self.time_elapsed:.2f}s elapsed)" ) def make_final_outputs( self, outputs: Dict[str, Any], run_manager: Union[CallbackManagerForChainRun, AsyncCallbackManagerForChainRun], ) -> AddableDict: # have access to intermediate steps by design in iterator, # so return only outputs may as well always be true. prepared_outputs = AddableDict( self.agent_executor.prep_outputs( self.inputs, outputs, return_only_outputs=True ) ) if self.include_run_info: prepared_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) return prepared_outputs def __iter__(self: "AgentExecutorIterator") -> Iterator[AddableDict]: logger.debug("Initialising AgentExecutorIterator") self.reset() callback_manager = CallbackManager.configure( self.callbacks, self.agent_executor.callbacks, self.agent_executor.verbose, self.tags, self.agent_executor.tags, self.metadata, self.agent_executor.metadata, ) run_manager = callback_manager.on_chain_start( dumpd(self.agent_executor), self.inputs, self.run_id, name=self.run_name, ) try: while self.agent_executor._should_continue( self.iterations, self.time_elapsed ): # take the next step: this plans next action, executes it, # yielding action and observation as they are generated next_step_seq: NextStepOutput = [] for chunk in self.agent_executor._iter_next_step( self.name_to_tool_map, self.color_mapping, self.inputs, self.intermediate_steps, run_manager, ): next_step_seq.append(chunk) # if we're yielding actions, yield them as they come # do not yield AgentFinish, which will be handled below if self.yield_actions: if isinstance(chunk, AgentAction): yield AddableDict(actions=[chunk], messages=chunk.messages) elif isinstance(chunk, AgentStep): yield AddableDict(steps=[chunk], messages=chunk.messages) # convert iterator output to format handled by _process_next_step_output next_step = self.agent_executor._consume_next_step(next_step_seq) # update iterations and time elapsed self.update_iterations() # decide if this is the final output output = self._process_next_step_output(next_step, run_manager) is_final = "intermediate_step" not in output # yield the final output always # for backwards compat, yield int. output if not yielding actions if not self.yield_actions or is_final: yield output # if final output reached, stop iteration if is_final: return except BaseException as e: run_manager.on_chain_error(e) raise # if we got here means we exhausted iterations or time yield self._stop(run_manager) async def __aiter__(self) -> AsyncIterator[AddableDict]: """ N.B. __aiter__ must be a normal method, so need to initialize async run manager on first __anext__ call where we can await it """ logger.debug("Initialising AgentExecutorIterator (async)") self.reset() callback_manager = AsyncCallbackManager.configure( self.callbacks, self.agent_executor.callbacks, self.agent_executor.verbose, self.tags, self.agent_executor.tags, self.metadata, self.agent_executor.metadata, ) run_manager = await callback_manager.on_chain_start( dumpd(self.agent_executor), self.inputs, self.run_id, name=self.run_name, ) try: async with asyncio_timeout(self.agent_executor.max_execution_time): while self.agent_executor._should_continue( self.iterations, self.time_elapsed ): # take the next step: this plans next action, executes it, # yielding action and observation as they are generated next_step_seq: NextStepOutput = [] async for chunk in self.agent_executor._aiter_next_step( self.name_to_tool_map, self.color_mapping, self.inputs, self.intermediate_steps, run_manager, ): next_step_seq.append(chunk) # if we're yielding actions, yield them as they come # do not yield AgentFinish, which will be handled below if self.yield_actions: if isinstance(chunk, AgentAction): yield AddableDict( actions=[chunk], messages=chunk.messages ) elif isinstance(chunk, AgentStep): yield AddableDict( steps=[chunk], messages=chunk.messages ) # convert iterator output to format handled by _process_next_step next_step = self.agent_executor._consume_next_step(next_step_seq) # update iterations and time elapsed self.update_iterations() # decide if this is the final output output = await self._aprocess_next_step_output( next_step, run_manager ) is_final = "intermediate_step" not in output # yield the final output always # for backwards compat, yield int. output if not yielding actions if not self.yield_actions or is_final: yield output # if final output reached, stop iteration if is_final: return except (TimeoutError, asyncio.TimeoutError): yield await self._astop(run_manager) return except BaseException as e: await run_manager.on_chain_error(e) raise # if we got here means we exhausted iterations or time yield await self._astop(run_manager) def _process_next_step_output( self, next_step_output: Union[AgentFinish, List[Tuple[AgentAction, str]]], run_manager: CallbackManagerForChainRun, ) -> AddableDict: """ Process the output of the next step, handling AgentFinish and tool return cases. """ logger.debug("Processing output of Agent loop step") if isinstance(next_step_output, AgentFinish): logger.debug( "Hit AgentFinish: _return -> on_chain_end -> run final output logic" ) return self._return(next_step_output, run_manager=run_manager) self.intermediate_steps.extend(next_step_output) logger.debug("Updated intermediate_steps with step output") # Check for tool return if len(next_step_output) == 1: next_step_action = next_step_output[0] tool_return = self.agent_executor._get_tool_return(next_step_action) if tool_return is not None: return self._return(tool_return, run_manager=run_manager) return AddableDict(intermediate_step=next_step_output) async def _aprocess_next_step_output( self, next_step_output: Union[AgentFinish, List[Tuple[AgentAction, str]]], run_manager: AsyncCallbackManagerForChainRun, ) -> AddableDict: """ Process the output of the next async step, handling AgentFinish and tool return cases. """ logger.debug("Processing output of async Agent loop step") if isinstance(next_step_output, AgentFinish): logger.debug( "Hit AgentFinish: _areturn -> on_chain_end -> run final output logic" ) return await self._areturn(next_step_output, run_manager=run_manager) self.intermediate_steps.extend(next_step_output) logger.debug("Updated intermediate_steps with step output") # Check for tool return if len(next_step_output) == 1: next_step_action = next_step_output[0] tool_return = self.agent_executor._get_tool_return(next_step_action) if tool_return is not None: return await self._areturn(tool_return, run_manager=run_manager) return AddableDict(intermediate_step=next_step_output) def _stop(self, run_manager: CallbackManagerForChainRun) -> AddableDict: """ Stop the iterator and raise a StopIteration exception with the stopped response. """ logger.warning("Stopping agent prematurely due to triggering stop condition") # this manually constructs agent finish with output key output = self.agent_executor._action_agent.return_stopped_response( self.agent_executor.early_stopping_method, self.intermediate_steps, **self.inputs, ) return self._return(output, run_manager=run_manager) async def _astop(self, run_manager: AsyncCallbackManagerForChainRun) -> AddableDict: """ Stop the async iterator and raise a StopAsyncIteration exception with the stopped response. """ logger.warning("Stopping agent prematurely due to triggering stop condition") output = self.agent_executor._action_agent.return_stopped_response( self.agent_executor.early_stopping_method, self.intermediate_steps, **self.inputs, ) return await self._areturn(output, run_manager=run_manager) def _return( self, output: AgentFinish, run_manager: CallbackManagerForChainRun ) -> AddableDict: """ Return the final output of the iterator. """ returned_output = self.agent_executor._return( output, self.intermediate_steps, run_manager=run_manager ) returned_output["messages"] = output.messages run_manager.on_chain_end(returned_output) return self.make_final_outputs(returned_output, run_manager) async def _areturn( self, output: AgentFinish, run_manager: AsyncCallbackManagerForChainRun ) -> AddableDict: """ Return the final output of the async iterator. """ returned_output = await self.agent_executor._areturn( output, self.intermediate_steps, run_manager=run_manager ) returned_output["messages"] = output.messages await run_manager.on_chain_end(returned_output) return self.make_final_outputs(returned_output, run_manager)
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/initialize.py
"""Load agent.""" from typing import Any, Optional, Sequence from langchain_core._api import deprecated from langchain_core.callbacks import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool from langchain._api.deprecation import AGENT_DEPRECATION_WARNING from langchain.agents.agent import AgentExecutor from langchain.agents.agent_types import AgentType from langchain.agents.loading import AGENT_TO_CLASS, load_agent @deprecated( "0.1.0", message=AGENT_DEPRECATION_WARNING, removal="1.0", ) def initialize_agent( tools: Sequence[BaseTool], llm: BaseLanguageModel, agent: Optional[AgentType] = None, callback_manager: Optional[BaseCallbackManager] = None, agent_path: Optional[str] = None, agent_kwargs: Optional[dict] = None, *, tags: Optional[Sequence[str]] = None, **kwargs: Any, ) -> AgentExecutor: """Load an agent executor given tools and LLM. Args: tools: List of tools this agent has access to. llm: Language model to use as the agent. agent: Agent type to use. If None and agent_path is also None, will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None. callback_manager: CallbackManager to use. Global callback manager is used if not provided. Defaults to None. agent_path: Path to serialized agent to use. If None and agent is also None, will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None. agent_kwargs: Additional keyword arguments to pass to the underlying agent. Defaults to None. tags: Tags to apply to the traced runs. Defaults to None. kwargs: Additional keyword arguments passed to the agent executor. Returns: An agent executor. Raises: ValueError: If both `agent` and `agent_path` are specified. ValueError: If `agent` is not a valid agent type. ValueError: If both `agent` and `agent_path` are None. """ tags_ = list(tags) if tags else [] if agent is None and agent_path is None: agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION if agent is not None and agent_path is not None: raise ValueError( "Both `agent` and `agent_path` are specified, " "but at most only one should be." ) if agent is not None: if agent not in AGENT_TO_CLASS: raise ValueError( f"Got unknown agent type: {agent}. " f"Valid types are: {AGENT_TO_CLASS.keys()}." ) tags_.append(agent.value if isinstance(agent, AgentType) else agent) agent_cls = AGENT_TO_CLASS[agent] agent_kwargs = agent_kwargs or {} agent_obj = agent_cls.from_llm_and_tools( llm, tools, callback_manager=callback_manager, **agent_kwargs ) elif agent_path is not None: agent_obj = load_agent( agent_path, llm=llm, tools=tools, callback_manager=callback_manager ) try: # TODO: Add tags from the serialized object directly. tags_.append(agent_obj._agent_type) except NotImplementedError: pass else: raise ValueError( "Somehow both `agent` and `agent_path` are None, " "this should never happen." ) return AgentExecutor.from_agent_and_tools( agent=agent_obj, tools=tools, callback_manager=callback_manager, tags=tags_, **kwargs, )
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/types.py
from typing import Dict, Type, Union from langchain.agents.agent import BaseSingleActionAgent from langchain.agents.agent_types import AgentType from langchain.agents.chat.base import ChatAgent from langchain.agents.conversational.base import ConversationalAgent from langchain.agents.conversational_chat.base import ConversationalChatAgent from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFunctionsAgent from langchain.agents.react.base import ReActDocstoreAgent from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent from langchain.agents.structured_chat.base import StructuredChatAgent AGENT_TYPE = Union[Type[BaseSingleActionAgent], Type[OpenAIMultiFunctionsAgent]] AGENT_TO_CLASS: Dict[AgentType, AGENT_TYPE] = { AgentType.ZERO_SHOT_REACT_DESCRIPTION: ZeroShotAgent, AgentType.REACT_DOCSTORE: ReActDocstoreAgent, AgentType.SELF_ASK_WITH_SEARCH: SelfAskWithSearchAgent, AgentType.CONVERSATIONAL_REACT_DESCRIPTION: ConversationalAgent, AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION: ChatAgent, AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION: ConversationalChatAgent, AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION: StructuredChatAgent, AgentType.OPENAI_FUNCTIONS: OpenAIFunctionsAgent, AgentType.OPENAI_MULTI_FUNCTIONS: OpenAIMultiFunctionsAgent, }
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/agent.py
"""Chain that takes in an input and produces an action and action input.""" from __future__ import annotations import asyncio import json import logging import time from abc import abstractmethod from pathlib import Path from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union, cast, ) import yaml from langchain_core._api import deprecated from langchain_core.agents import AgentAction, AgentFinish, AgentStep from langchain_core.callbacks import ( AsyncCallbackManagerForChainRun, AsyncCallbackManagerForToolRun, BaseCallbackManager, CallbackManagerForChainRun, CallbackManagerForToolRun, Callbacks, ) from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts import BasePromptTemplate from langchain_core.prompts.few_shot import FewShotPromptTemplate from langchain_core.prompts.prompt import PromptTemplate from langchain_core.runnables import Runnable, RunnableConfig, ensure_config from langchain_core.runnables.utils import AddableDict from langchain_core.tools import BaseTool from langchain_core.utils.input import get_color_mapping from pydantic import BaseModel, ConfigDict, model_validator from typing_extensions import Self from langchain._api.deprecation import AGENT_DEPRECATION_WARNING from langchain.agents.agent_iterator import AgentExecutorIterator from langchain.agents.agent_types import AgentType from langchain.agents.tools import InvalidTool from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.utilities.asyncio import asyncio_timeout logger = logging.getLogger(__name__) class BaseSingleActionAgent(BaseModel): """Base Single Action Agent class.""" @property def return_values(self) -> List[str]: """Return values of the agent.""" return ["output"] def get_allowed_tools(self) -> Optional[List[str]]: return None @abstractmethod def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ @abstractmethod async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Async given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ @property @abstractmethod def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations. Args: early_stopping_method: Method to use for early stopping. intermediate_steps: Steps the LLM has taken to date, along with observations. **kwargs: User inputs. Returns: AgentFinish: Agent finish object. Raises: ValueError: If `early_stopping_method` is not supported. """ if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish( {"output": "Agent stopped due to iteration limit or time limit."}, "" ) else: raise ValueError( f"Got unsupported early_stopping_method `{early_stopping_method}`" ) @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, **kwargs: Any, ) -> BaseSingleActionAgent: """Construct an agent from an LLM and tools. Args: llm: Language model to use. tools: Tools to use. callback_manager: Callback manager to use. kwargs: Additional arguments. Returns: BaseSingleActionAgent: Agent object. """ raise NotImplementedError @property def _agent_type(self) -> str: """Return Identifier of an agent type.""" raise NotImplementedError def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent. Returns: Dict: Dictionary representation of agent. """ _dict = super().model_dump() try: _type = self._agent_type except NotImplementedError: _type = None if isinstance(_type, AgentType): _dict["_type"] = str(_type.value) elif _type is not None: _dict["_type"] = _type return _dict def save(self, file_path: Union[Path, str]) -> None: """Save the agent. Args: file_path: Path to file to save the agent to. Example: .. code-block:: python # If working with agent executor agent.agent.save(file_path="path/agent.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save agent_dict = self.dict() if "_type" not in agent_dict: raise NotImplementedError(f"Agent {self} does not support saving") if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(agent_dict, f, indent=4) elif save_path.suffix.endswith((".yaml", ".yml")): with open(file_path, "w") as f: yaml.dump(agent_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") def tool_run_logging_kwargs(self) -> Dict: """Return logging kwargs for tool run.""" return {} class BaseMultiActionAgent(BaseModel): """Base Multi Action Agent class.""" @property def return_values(self) -> List[str]: """Return values of the agent.""" return ["output"] def get_allowed_tools(self) -> Optional[List[str]]: """Get allowed tools. Returns: Optional[List[str]]: Allowed tools. """ return None @abstractmethod def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[List[AgentAction], AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Actions specifying what tool to use. """ @abstractmethod async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[List[AgentAction], AgentFinish]: """Async given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Actions specifying what tool to use. """ @property @abstractmethod def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations. Args: early_stopping_method: Method to use for early stopping. intermediate_steps: Steps the LLM has taken to date, along with observations. **kwargs: User inputs. Returns: AgentFinish: Agent finish object. Raises: ValueError: If `early_stopping_method` is not supported. """ if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish({"output": "Agent stopped due to max iterations."}, "") else: raise ValueError( f"Got unsupported early_stopping_method `{early_stopping_method}`" ) @property def _agent_type(self) -> str: """Return Identifier of an agent type.""" raise NotImplementedError def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent.""" _dict = super().model_dump() try: _dict["_type"] = str(self._agent_type) except NotImplementedError: pass return _dict def save(self, file_path: Union[Path, str]) -> None: """Save the agent. Args: file_path: Path to file to save the agent to. Raises: NotImplementedError: If agent does not support saving. ValueError: If file_path is not json or yaml. Example: .. code-block:: python # If working with agent executor agent.agent.save(file_path="path/agent.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path # Fetch dictionary to save agent_dict = self.dict() if "_type" not in agent_dict: raise NotImplementedError(f"Agent {self} does not support saving.") directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(agent_dict, f, indent=4) elif save_path.suffix.endswith((".yaml", ".yml")): with open(file_path, "w") as f: yaml.dump(agent_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") def tool_run_logging_kwargs(self) -> Dict: """Return logging kwargs for tool run.""" return {} class AgentOutputParser(BaseOutputParser[Union[AgentAction, AgentFinish]]): """Base class for parsing agent output into agent action/finish.""" @abstractmethod def parse(self, text: str) -> Union[AgentAction, AgentFinish]: """Parse text into agent action/finish.""" class MultiActionAgentOutputParser( BaseOutputParser[Union[List[AgentAction], AgentFinish]] ): """Base class for parsing agent output into agent actions/finish. This is used for agents that can return multiple actions. """ @abstractmethod def parse(self, text: str) -> Union[List[AgentAction], AgentFinish]: """Parse text into agent actions/finish. Args: text: Text to parse. Returns: Union[List[AgentAction], AgentFinish]: List of agent actions or agent finish. """ class RunnableAgent(BaseSingleActionAgent): """Agent powered by Runnables.""" runnable: Runnable[dict, Union[AgentAction, AgentFinish]] """Runnable to call to get agent action.""" input_keys_arg: List[str] = [] return_keys_arg: List[str] = [] stream_runnable: bool = True """Whether to stream from the runnable or not. If True then underlying LLM is invoked in a streaming fashion to make it possible to get access to the individual LLM tokens when using stream_log with the Agent Executor. If False then LLM is invoked in a non-streaming fashion and individual LLM tokens will not be available in stream_log. """ model_config = ConfigDict( arbitrary_types_allowed=True, ) @property def return_values(self) -> List[str]: """Return values of the agent.""" return self.return_keys_arg @property def input_keys(self) -> List[str]: """Return the input keys.""" return self.input_keys_arg def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Based on past history and current inputs, decide what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} final_output: Any = None if self.stream_runnable: # Use streaming to make sure that the underlying LLM is invoked in a # streaming # fashion to make it possible to get access to the individual LLM tokens # when using stream_log with the Agent Executor. # Because the response from the plan is not a generator, we need to # accumulate the output into final output and return that. for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}): if final_output is None: final_output = chunk else: final_output += chunk else: final_output = self.runnable.invoke(inputs, config={"callbacks": callbacks}) return final_output async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[ AgentAction, AgentFinish, ]: """Async based on past history and current inputs, decide what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} final_output: Any = None if self.stream_runnable: # Use streaming to make sure that the underlying LLM is invoked in a # streaming # fashion to make it possible to get access to the individual LLM tokens # when using stream_log with the Agent Executor. # Because the response from the plan is not a generator, we need to # accumulate the output into final output and return that. async for chunk in self.runnable.astream( inputs, config={"callbacks": callbacks} ): if final_output is None: final_output = chunk else: final_output += chunk else: final_output = await self.runnable.ainvoke( inputs, config={"callbacks": callbacks} ) return final_output class RunnableMultiActionAgent(BaseMultiActionAgent): """Agent powered by Runnables.""" runnable: Runnable[dict, Union[List[AgentAction], AgentFinish]] """Runnable to call to get agent actions.""" input_keys_arg: List[str] = [] return_keys_arg: List[str] = [] stream_runnable: bool = True """Whether to stream from the runnable or not. If True then underlying LLM is invoked in a streaming fashion to make it possible to get access to the individual LLM tokens when using stream_log with the Agent Executor. If False then LLM is invoked in a non-streaming fashion and individual LLM tokens will not be available in stream_log. """ model_config = ConfigDict( arbitrary_types_allowed=True, ) @property def return_values(self) -> List[str]: """Return values of the agent.""" return self.return_keys_arg @property def input_keys(self) -> List[str]: """Return the input keys. Returns: List of input keys. """ return self.input_keys_arg def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[ List[AgentAction], AgentFinish, ]: """Based on past history and current inputs, decide what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} final_output: Any = None if self.stream_runnable: # Use streaming to make sure that the underlying LLM is invoked in a # streaming # fashion to make it possible to get access to the individual LLM tokens # when using stream_log with the Agent Executor. # Because the response from the plan is not a generator, we need to # accumulate the output into final output and return that. for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}): if final_output is None: final_output = chunk else: final_output += chunk else: final_output = self.runnable.invoke(inputs, config={"callbacks": callbacks}) return final_output async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[ List[AgentAction], AgentFinish, ]: """Async based on past history and current inputs, decide what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} final_output: Any = None if self.stream_runnable: # Use streaming to make sure that the underlying LLM is invoked in a # streaming # fashion to make it possible to get access to the individual LLM tokens # when using stream_log with the Agent Executor. # Because the response from the plan is not a generator, we need to # accumulate the output into final output and return that. async for chunk in self.runnable.astream( inputs, config={"callbacks": callbacks} ): if final_output is None: final_output = chunk else: final_output += chunk else: final_output = await self.runnable.ainvoke( inputs, config={"callbacks": callbacks} ) return final_output @deprecated( "0.1.0", message=AGENT_DEPRECATION_WARNING, removal="1.0", ) class LLMSingleActionAgent(BaseSingleActionAgent): """Base class for single action agents.""" llm_chain: LLMChain """LLMChain to use for agent.""" output_parser: AgentOutputParser """Output parser to use for agent.""" stop: List[str] """List of strings to stop on.""" @property def input_keys(self) -> List[str]: """Return the input keys. Returns: List of input keys. """ return list(set(self.llm_chain.input_keys) - {"intermediate_steps"}) def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent.""" _dict = super().dict() del _dict["output_parser"] return _dict def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ output = self.llm_chain.run( intermediate_steps=intermediate_steps, stop=self.stop, callbacks=callbacks, **kwargs, ) return self.output_parser.parse(output) async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Async given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ output = await self.llm_chain.arun( intermediate_steps=intermediate_steps, stop=self.stop, callbacks=callbacks, **kwargs, ) return self.output_parser.parse(output) def tool_run_logging_kwargs(self) -> Dict: """Return logging kwargs for tool run.""" return { "llm_prefix": "", "observation_prefix": "" if len(self.stop) == 0 else self.stop[0], } @deprecated( "0.1.0", message=AGENT_DEPRECATION_WARNING, removal="1.0", ) class Agent(BaseSingleActionAgent): """Agent that calls the language model and deciding the action. This is driven by a LLMChain. The prompt in the LLMChain MUST include a variable called "agent_scratchpad" where the agent can put its intermediary work. """ llm_chain: LLMChain """LLMChain to use for agent.""" output_parser: AgentOutputParser """Output parser to use for agent.""" allowed_tools: Optional[List[str]] = None """Allowed tools for the agent. If None, all tools are allowed.""" def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent.""" _dict = super().dict() del _dict["output_parser"] return _dict def get_allowed_tools(self) -> Optional[List[str]]: """Get allowed tools.""" return self.allowed_tools @property def return_values(self) -> List[str]: """Return values of the agent.""" return ["output"] def _fix_text(self, text: str) -> str: """Fix the text. Args: text: Text to fix. Returns: str: Fixed text. """ raise ValueError("fix_text not implemented for this agent.") @property def _stop(self) -> List[str]: return [ f"\n{self.observation_prefix.rstrip()}", f"\n\t{self.observation_prefix.rstrip()}", ] def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> Union[str, List[BaseMessage]]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" return thoughts def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs) return self.output_parser.parse(full_output) async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Async given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) full_output = await self.llm_chain.apredict(callbacks=callbacks, **full_inputs) agent_output = await self.output_parser.aparse(full_output) return agent_output def get_full_inputs( self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any ) -> Dict[str, Any]: """Create the full inputs for the LLMChain from intermediate steps. Args: intermediate_steps: Steps the LLM has taken to date, along with observations. **kwargs: User inputs. Returns: Dict[str, Any]: Full inputs for the LLMChain. """ thoughts = self._construct_scratchpad(intermediate_steps) new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} full_inputs = {**kwargs, **new_inputs} return full_inputs @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return list(set(self.llm_chain.input_keys) - {"agent_scratchpad"}) @model_validator(mode="after") def validate_prompt(self) -> Self: """Validate that prompt matches format. Args: values: Values to validate. Returns: Dict: Validated values. Raises: ValueError: If `agent_scratchpad` is not in prompt.input_variables and prompt is not a FewShotPromptTemplate or a PromptTemplate. """ prompt = self.llm_chain.prompt if "agent_scratchpad" not in prompt.input_variables: logger.warning( "`agent_scratchpad` should be a variable in prompt.input_variables." " Did not find it, so adding it at the end." ) prompt.input_variables.append("agent_scratchpad") if isinstance(prompt, PromptTemplate): prompt.template += "\n{agent_scratchpad}" elif isinstance(prompt, FewShotPromptTemplate): prompt.suffix += "\n{agent_scratchpad}" else: raise ValueError(f"Got unexpected prompt type {type(prompt)}") return self @property @abstractmethod def observation_prefix(self) -> str: """Prefix to append the observation with.""" @property @abstractmethod def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" @classmethod @abstractmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Create a prompt for this class. Args: tools: Tools to use. Returns: BasePromptTemplate: Prompt template. """ @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: """Validate that appropriate tools are passed in. Args: tools: Tools to use. """ pass @classmethod @abstractmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: """Get default output parser for this class.""" @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools. Args: llm: Language model to use. tools: Tools to use. callback_manager: Callback manager to use. output_parser: Output parser to use. kwargs: Additional arguments. Returns: Agent: Agent object. """ cls._validate_tools(tools) llm_chain = LLMChain( llm=llm, prompt=cls.create_prompt(tools), callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations. Args: early_stopping_method: Method to use for early stopping. intermediate_steps: Steps the LLM has taken to date, along with observations. **kwargs: User inputs. Returns: AgentFinish: Agent finish object. Raises: ValueError: If `early_stopping_method` is not in ['force', 'generate']. """ if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish( {"output": "Agent stopped due to iteration limit or time limit."}, "" ) elif early_stopping_method == "generate": # Generate does one final forward pass thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += ( f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" ) # Adding to the previous steps, we now tell the LLM to make a final pred thoughts += ( "\n\nI now need to return a final answer based on the previous steps:" ) new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} full_inputs = {**kwargs, **new_inputs} full_output = self.llm_chain.predict(**full_inputs) # We try to extract a final answer parsed_output = self.output_parser.parse(full_output) if isinstance(parsed_output, AgentFinish): # If we can extract, we send the correct stuff return parsed_output else: # If we can extract, but the tool is not the final tool, # we just return the full output return AgentFinish({"output": full_output}, full_output) else: raise ValueError( "early_stopping_method should be one of `force` or `generate`, " f"got {early_stopping_method}" ) def tool_run_logging_kwargs(self) -> Dict: """Return logging kwargs for tool run.""" return { "llm_prefix": self.llm_prefix, "observation_prefix": self.observation_prefix, } class ExceptionTool(BaseTool): # type: ignore[override] """Tool that just returns the query.""" name: str = "_Exception" """Name of the tool.""" description: str = "Exception tool" """Description of the tool.""" def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: return query async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return query NextStepOutput = List[Union[AgentFinish, AgentAction, AgentStep]] RunnableAgentType = Union[RunnableAgent, RunnableMultiActionAgent] class AgentExecutor(Chain): """Agent that is using tools.""" agent: Union[BaseSingleActionAgent, BaseMultiActionAgent, Runnable] """The agent to run for creating a plan and determining actions to take at each step of the execution loop.""" tools: Sequence[BaseTool] """The valid tools the agent can call.""" return_intermediate_steps: bool = False """Whether to return the agent's trajectory of intermediate steps at the end in addition to the final output.""" max_iterations: Optional[int] = 15 """The maximum number of steps to take before ending the execution loop. Setting to 'None' could lead to an infinite loop.""" max_execution_time: Optional[float] = None """The maximum amount of wall clock time to spend in the execution loop. """ early_stopping_method: str = "force" """The method to use for early stopping if the agent never returns `AgentFinish`. Either 'force' or 'generate'. `"force"` returns a string saying that it stopped because it met a time or iteration limit. `"generate"` calls the agent's LLM Chain one final time to generate a final answer based on the previous steps. """ handle_parsing_errors: Union[bool, str, Callable[[OutputParserException], str]] = ( False ) """How to handle errors raised by the agent's output parser. Defaults to `False`, which raises the error. If `true`, the error will be sent back to the LLM as an observation. If a string, the string itself will be sent to the LLM as an observation. If a callable function, the function will be called with the exception as an argument, and the result of that function will be passed to the agent as an observation. """ trim_intermediate_steps: Union[ int, Callable[[List[Tuple[AgentAction, str]]], List[Tuple[AgentAction, str]]] ] = -1 """How to trim the intermediate steps before returning them. Defaults to -1, which means no trimming. """ @classmethod def from_agent_and_tools( cls, agent: Union[BaseSingleActionAgent, BaseMultiActionAgent, Runnable], tools: Sequence[BaseTool], callbacks: Callbacks = None, **kwargs: Any, ) -> AgentExecutor: """Create from agent and tools. Args: agent: Agent to use. tools: Tools to use. callbacks: Callbacks to use. kwargs: Additional arguments. Returns: AgentExecutor: Agent executor object. """ return cls( agent=agent, tools=tools, callbacks=callbacks, **kwargs, ) @model_validator(mode="after") def validate_tools(self) -> Self: """Validate that tools are compatible with agent. Args: values: Values to validate. Returns: Dict: Validated values. Raises: ValueError: If allowed tools are different than provided tools. """ agent = self.agent tools = self.tools allowed_tools = agent.get_allowed_tools() # type: ignore if allowed_tools is not None: if set(allowed_tools) != set([tool.name for tool in tools]): raise ValueError( f"Allowed tools ({allowed_tools}) different than " f"provided tools ({[tool.name for tool in tools]})" ) return self @model_validator(mode="before") @classmethod def validate_runnable_agent(cls, values: Dict) -> Any: """Convert runnable to agent if passed in. Args: values: Values to validate. Returns: Dict: Validated values. """ agent = values.get("agent") if agent and isinstance(agent, Runnable): try: output_type = agent.OutputType except Exception as _: multi_action = False else: multi_action = output_type == Union[List[AgentAction], AgentFinish] stream_runnable = values.pop("stream_runnable", True) if multi_action: values["agent"] = RunnableMultiActionAgent( runnable=agent, stream_runnable=stream_runnable ) else: values["agent"] = RunnableAgent( runnable=agent, stream_runnable=stream_runnable ) return values @property def _action_agent(self) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: """Type cast self.agent. If the `agent` attribute is a Runnable, it will be converted one of RunnableAgentType in the validate_runnable_agent root_validator. To support instantiating with a Runnable, here we explicitly cast the type to reflect the changes made in the root_validator. """ if isinstance(self.agent, Runnable): return cast(RunnableAgentType, self.agent) else: return self.agent def save(self, file_path: Union[Path, str]) -> None: """Raise error - saving not supported for Agent Executors. Args: file_path: Path to save to. Raises: ValueError: Saving not supported for agent executors. """ raise ValueError( "Saving not supported for agent executors. " "If you are trying to save the agent, please use the " "`.save_agent(...)`" ) def save_agent(self, file_path: Union[Path, str]) -> None: """Save the underlying agent. Args: file_path: Path to save to. """ return self._action_agent.save(file_path) def iter( self, inputs: Any, callbacks: Callbacks = None, *, include_run_info: bool = False, async_: bool = False, # arg kept for backwards compat, but ignored ) -> AgentExecutorIterator: """Enables iteration over steps taken to reach final output. Args: inputs: Inputs to the agent. callbacks: Callbacks to run. include_run_info: Whether to include run info. async_: Whether to run async. (Ignored) Returns: AgentExecutorIterator: Agent executor iterator object. """ return AgentExecutorIterator( self, inputs, callbacks, tags=self.tags, include_run_info=include_run_info, ) @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return self._action_agent.input_keys @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if self.return_intermediate_steps: return self._action_agent.return_values + ["intermediate_steps"] else: return self._action_agent.return_values def lookup_tool(self, name: str) -> BaseTool: """Lookup tool by name. Args: name: Name of tool. Returns: BaseTool: Tool object. """ return {tool.name: tool for tool in self.tools}[name] def _should_continue(self, iterations: int, time_elapsed: float) -> bool: if self.max_iterations is not None and iterations >= self.max_iterations: return False if ( self.max_execution_time is not None and time_elapsed >= self.max_execution_time ): return False return True def _return( self, output: AgentFinish, intermediate_steps: list, run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: if run_manager: run_manager.on_agent_finish(output, color="green", verbose=self.verbose) final_output = output.return_values if self.return_intermediate_steps: final_output["intermediate_steps"] = intermediate_steps return final_output async def _areturn( self, output: AgentFinish, intermediate_steps: list, run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: if run_manager: await run_manager.on_agent_finish( output, color="green", verbose=self.verbose ) final_output = output.return_values if self.return_intermediate_steps: final_output["intermediate_steps"] = intermediate_steps return final_output def _consume_next_step( self, values: NextStepOutput ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: if isinstance(values[-1], AgentFinish): assert len(values) == 1 return values[-1] else: return [ (a.action, a.observation) for a in values if isinstance(a, AgentStep) ] def _take_next_step( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], inputs: Dict[str, str], intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: return self._consume_next_step( [ a for a in self._iter_next_step( name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager, ) ] ) def _iter_next_step( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], inputs: Dict[str, str], intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]: """Take a single step in the thought-action-observation loop. Override this to take control of how the agent makes and acts on choices. """ try: intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) # Call the LLM to see what to do. output = self._action_agent.plan( intermediate_steps, callbacks=run_manager.get_child() if run_manager else None, **inputs, ) except OutputParserException as e: if isinstance(self.handle_parsing_errors, bool): raise_error = not self.handle_parsing_errors else: raise_error = False if raise_error: raise ValueError( "An output parsing error occurred. " "In order to pass this error back to the agent and have it try " "again, pass `handle_parsing_errors=True` to the AgentExecutor. " f"This is the error: {str(e)}" ) text = str(e) if isinstance(self.handle_parsing_errors, bool): if e.send_to_llm: observation = str(e.observation) text = str(e.llm_output) else: observation = "Invalid or incomplete response" elif isinstance(self.handle_parsing_errors, str): observation = self.handle_parsing_errors elif callable(self.handle_parsing_errors): observation = self.handle_parsing_errors(e) else: raise ValueError("Got unexpected type of `handle_parsing_errors`") output = AgentAction("_Exception", observation, text) if run_manager: run_manager.on_agent_action(output, color="green") tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() observation = ExceptionTool().run( output.tool_input, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) yield AgentStep(action=output, observation=observation) return # If the tool chosen is the finishing tool, then we end and return. if isinstance(output, AgentFinish): yield output return actions: List[AgentAction] if isinstance(output, AgentAction): actions = [output] else: actions = output for agent_action in actions: yield agent_action for agent_action in actions: yield self._perform_agent_action( name_to_tool_map, color_mapping, agent_action, run_manager ) def _perform_agent_action( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], agent_action: AgentAction, run_manager: Optional[CallbackManagerForChainRun] = None, ) -> AgentStep: if run_manager: run_manager.on_agent_action(agent_action, color="green") # Otherwise we lookup the tool if agent_action.tool in name_to_tool_map: tool = name_to_tool_map[agent_action.tool] return_direct = tool.return_direct color = color_mapping[agent_action.tool] tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() if return_direct: tool_run_kwargs["llm_prefix"] = "" # We then call the tool on the tool input to get an observation observation = tool.run( agent_action.tool_input, verbose=self.verbose, color=color, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) else: tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() observation = InvalidTool().run( { "requested_tool_name": agent_action.tool, "available_tool_names": list(name_to_tool_map.keys()), }, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) return AgentStep(action=agent_action, observation=observation) async def _atake_next_step( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], inputs: Dict[str, str], intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: return self._consume_next_step( [ a async for a in self._aiter_next_step( name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager, ) ] ) async def _aiter_next_step( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], inputs: Dict[str, str], intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> AsyncIterator[Union[AgentFinish, AgentAction, AgentStep]]: """Take a single step in the thought-action-observation loop. Override this to take control of how the agent makes and acts on choices. """ try: intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) # Call the LLM to see what to do. output = await self._action_agent.aplan( intermediate_steps, callbacks=run_manager.get_child() if run_manager else None, **inputs, ) except OutputParserException as e: if isinstance(self.handle_parsing_errors, bool): raise_error = not self.handle_parsing_errors else: raise_error = False if raise_error: raise ValueError( "An output parsing error occurred. " "In order to pass this error back to the agent and have it try " "again, pass `handle_parsing_errors=True` to the AgentExecutor. " f"This is the error: {str(e)}" ) text = str(e) if isinstance(self.handle_parsing_errors, bool): if e.send_to_llm: observation = str(e.observation) text = str(e.llm_output) else: observation = "Invalid or incomplete response" elif isinstance(self.handle_parsing_errors, str): observation = self.handle_parsing_errors elif callable(self.handle_parsing_errors): observation = self.handle_parsing_errors(e) else: raise ValueError("Got unexpected type of `handle_parsing_errors`") output = AgentAction("_Exception", observation, text) tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() observation = await ExceptionTool().arun( output.tool_input, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) yield AgentStep(action=output, observation=observation) return # If the tool chosen is the finishing tool, then we end and return. if isinstance(output, AgentFinish): yield output return actions: List[AgentAction] if isinstance(output, AgentAction): actions = [output] else: actions = output for agent_action in actions: yield agent_action # Use asyncio.gather to run multiple tool.arun() calls concurrently result = await asyncio.gather( *[ self._aperform_agent_action( name_to_tool_map, color_mapping, agent_action, run_manager ) for agent_action in actions ], ) # TODO This could yield each result as it becomes available for chunk in result: yield chunk async def _aperform_agent_action( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], agent_action: AgentAction, run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> AgentStep: if run_manager: await run_manager.on_agent_action( agent_action, verbose=self.verbose, color="green" ) # Otherwise we lookup the tool if agent_action.tool in name_to_tool_map: tool = name_to_tool_map[agent_action.tool] return_direct = tool.return_direct color = color_mapping[agent_action.tool] tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() if return_direct: tool_run_kwargs["llm_prefix"] = "" # We then call the tool on the tool input to get an observation observation = await tool.arun( agent_action.tool_input, verbose=self.verbose, color=color, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) else: tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() observation = await InvalidTool().arun( { "requested_tool_name": agent_action.tool, "available_tool_names": list(name_to_tool_map.keys()), }, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) return AgentStep(action=agent_action, observation=observation) def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run text through and get agent response.""" # Construct a mapping of tool name to tool for easy lookup name_to_tool_map = {tool.name: tool for tool in self.tools} # We construct a mapping from each tool to a color, used for logging. color_mapping = get_color_mapping( [tool.name for tool in self.tools], excluded_colors=["green", "red"] ) intermediate_steps: List[Tuple[AgentAction, str]] = [] # Let's start tracking the number of iterations and time elapsed iterations = 0 time_elapsed = 0.0 start_time = time.time() # We now enter the agent loop (until it returns something). while self._should_continue(iterations, time_elapsed): next_step_output = self._take_next_step( name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager=run_manager, ) if isinstance(next_step_output, AgentFinish): return self._return( next_step_output, intermediate_steps, run_manager=run_manager ) intermediate_steps.extend(next_step_output) if len(next_step_output) == 1: next_step_action = next_step_output[0] # See if tool should return directly tool_return = self._get_tool_return(next_step_action) if tool_return is not None: return self._return( tool_return, intermediate_steps, run_manager=run_manager ) iterations += 1 time_elapsed = time.time() - start_time output = self._action_agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return self._return(output, intermediate_steps, run_manager=run_manager) async def _acall( self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Async run text through and get agent response.""" # Construct a mapping of tool name to tool for easy lookup name_to_tool_map = {tool.name: tool for tool in self.tools} # We construct a mapping from each tool to a color, used for logging. color_mapping = get_color_mapping( [tool.name for tool in self.tools], excluded_colors=["green"] ) intermediate_steps: List[Tuple[AgentAction, str]] = [] # Let's start tracking the number of iterations and time elapsed iterations = 0 time_elapsed = 0.0 start_time = time.time() # We now enter the agent loop (until it returns something). try: async with asyncio_timeout(self.max_execution_time): while self._should_continue(iterations, time_elapsed): next_step_output = await self._atake_next_step( name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager=run_manager, ) if isinstance(next_step_output, AgentFinish): return await self._areturn( next_step_output, intermediate_steps, run_manager=run_manager, ) intermediate_steps.extend(next_step_output) if len(next_step_output) == 1: next_step_action = next_step_output[0] # See if tool should return directly tool_return = self._get_tool_return(next_step_action) if tool_return is not None: return await self._areturn( tool_return, intermediate_steps, run_manager=run_manager ) iterations += 1 time_elapsed = time.time() - start_time output = self._action_agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return await self._areturn( output, intermediate_steps, run_manager=run_manager ) except (TimeoutError, asyncio.TimeoutError): # stop early when interrupted by the async timeout output = self._action_agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return await self._areturn( output, intermediate_steps, run_manager=run_manager ) def _get_tool_return( self, next_step_output: Tuple[AgentAction, str] ) -> Optional[AgentFinish]: """Check if the tool is a returning tool.""" agent_action, observation = next_step_output name_to_tool_map = {tool.name: tool for tool in self.tools} return_value_key = "output" if len(self._action_agent.return_values) > 0: return_value_key = self._action_agent.return_values[0] # Invalid tools won't be in the map, so we return False. if agent_action.tool in name_to_tool_map: if name_to_tool_map[agent_action.tool].return_direct: return AgentFinish( {return_value_key: observation}, "", ) return None def _prepare_intermediate_steps( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> List[Tuple[AgentAction, str]]: if ( isinstance(self.trim_intermediate_steps, int) and self.trim_intermediate_steps > 0 ): return intermediate_steps[-self.trim_intermediate_steps :] elif callable(self.trim_intermediate_steps): return self.trim_intermediate_steps(intermediate_steps) else: return intermediate_steps def stream( self, input: Union[Dict[str, Any], Any], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Iterator[AddableDict]: """Enables streaming over steps taken to reach final output. Args: input: Input to the agent. config: Config to use. kwargs: Additional arguments. Yields: AddableDict: Addable dictionary. """ config = ensure_config(config) iterator = AgentExecutorIterator( self, input, config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), run_name=config.get("run_name"), run_id=config.get("run_id"), yield_actions=True, **kwargs, ) for step in iterator: yield step async def astream( self, input: Union[Dict[str, Any], Any], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> AsyncIterator[AddableDict]: """Async enables streaming over steps taken to reach final output. Args: input: Input to the agent. config: Config to use. kwargs: Additional arguments. Yields: AddableDict: Addable dictionary. """ config = ensure_config(config) iterator = AgentExecutorIterator( self, input, config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), run_name=config.get("run_name"), run_id=config.get("run_id"), yield_actions=True, **kwargs, ) async for step in iterator: yield step
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/schema.py
from typing import Any, Dict, List, Tuple from langchain_core.agents import AgentAction from langchain_core.prompts.chat import ChatPromptTemplate class AgentScratchPadChatPromptTemplate(ChatPromptTemplate): """Chat prompt template for the agent scratchpad.""" @classmethod def is_lc_serializable(cls) -> bool: return False def _construct_agent_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> str: if len(intermediate_steps) == 0: return "" thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\nObservation: {observation}\nThought: " return ( f"This was your previous work " f"(but I haven't seen any of it! I only see what " f"you return as final answer):\n{thoughts}" ) def _merge_partial_and_user_variables(self, **kwargs: Any) -> Dict[str, Any]: intermediate_steps = kwargs.pop("intermediate_steps") kwargs["agent_scratchpad"] = self._construct_agent_scratchpad( intermediate_steps ) return kwargs
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/tools.py
"""Interface for tools.""" from typing import List, Optional from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool, tool class InvalidTool(BaseTool): # type: ignore[override] """Tool that is run when invalid tool name is encountered by agent.""" name: str = "invalid_tool" """Name of the tool.""" description: str = "Called when tool name is invalid. Suggests valid tool names." """Description of the tool.""" def _run( self, requested_tool_name: str, available_tool_names: List[str], run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" available_tool_names_str = ", ".join([tool for tool in available_tool_names]) return ( f"{requested_tool_name} is not a valid tool, " f"try one of [{available_tool_names_str}]." ) async def _arun( self, requested_tool_name: str, available_tool_names: List[str], run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" available_tool_names_str = ", ".join([tool for tool in available_tool_names]) return ( f"{requested_tool_name} is not a valid tool, " f"try one of [{available_tool_names_str}]." ) __all__ = ["InvalidTool", "tool"]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_types.py
"""Module definitions of agent types together with corresponding agents.""" from enum import Enum from langchain_core._api import deprecated from langchain._api.deprecation import AGENT_DEPRECATION_WARNING @deprecated( "0.1.0", message=AGENT_DEPRECATION_WARNING, removal="1.0", ) class AgentType(str, Enum): """An enum for agent types. See documentation: https://python.langchain.com/docs/modules/agents/agent_types/ """ ZERO_SHOT_REACT_DESCRIPTION = "zero-shot-react-description" """A zero shot agent that does a reasoning step before acting.""" REACT_DOCSTORE = "react-docstore" """A zero shot agent that does a reasoning step before acting. This agent has access to a document store that allows it to look up relevant information to answering the question. """ SELF_ASK_WITH_SEARCH = "self-ask-with-search" """An agent that breaks down a complex question into a series of simpler questions. This agent uses a search tool to look up answers to the simpler questions in order to answer the original complex question. """ CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description" CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description" """A zero shot agent that does a reasoning step before acting. This agent is designed to be used in conjunction """ CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description" STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION = ( "structured-chat-zero-shot-react-description" ) """An zero-shot react agent optimized for chat models. This agent is capable of invoking tools that have multiple inputs. """ OPENAI_FUNCTIONS = "openai-functions" """An agent optimized for using open AI functions.""" OPENAI_MULTI_FUNCTIONS = "openai-multi-functions"
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/utils.py
from typing import Sequence from langchain_core.tools import BaseTool def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None: """Validate tools for single input. Args: class_name: Name of the class. tools: List of tools to validate. Raises: ValueError: If a multi-input tool is found in tools. """ for tool in tools: if not tool.is_single_input: raise ValueError( f"{class_name} does not support multi-input tool {tool.name}." )
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/__init__.py
""" **Agent** is a class that uses an LLM to choose a sequence of actions to take. In Chains, a sequence of actions is hardcoded. In Agents, a language model is used as a reasoning engine to determine which actions to take and in which order. Agents select and use **Tools** and **Toolkits** for actions. **Class hierarchy:** .. code-block:: BaseSingleActionAgent --> LLMSingleActionAgent OpenAIFunctionsAgent XMLAgent Agent --> <name>Agent # Examples: ZeroShotAgent, ChatAgent BaseMultiActionAgent --> OpenAIMultiFunctionsAgent **Main helpers:** .. code-block:: AgentType, AgentExecutor, AgentOutputParser, AgentExecutorIterator, AgentAction, AgentFinish """ # noqa: E501 from pathlib import Path from typing import TYPE_CHECKING, Any from langchain_core._api.path import as_import_path from langchain_core.tools import Tool from langchain_core.tools.convert import tool from langchain._api import create_importer from langchain.agents.agent import ( Agent, AgentExecutor, AgentOutputParser, BaseMultiActionAgent, BaseSingleActionAgent, LLMSingleActionAgent, ) from langchain.agents.agent_iterator import AgentExecutorIterator from langchain.agents.agent_toolkits.vectorstore.base import ( create_vectorstore_agent, create_vectorstore_router_agent, ) from langchain.agents.agent_types import AgentType from langchain.agents.conversational.base import ConversationalAgent from langchain.agents.conversational_chat.base import ConversationalChatAgent from langchain.agents.initialize import initialize_agent from langchain.agents.json_chat.base import create_json_chat_agent from langchain.agents.loading import load_agent from langchain.agents.mrkl.base import MRKLChain, ZeroShotAgent from langchain.agents.openai_functions_agent.base import ( OpenAIFunctionsAgent, create_openai_functions_agent, ) from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFunctionsAgent from langchain.agents.openai_tools.base import create_openai_tools_agent from langchain.agents.react.agent import create_react_agent from langchain.agents.react.base import ReActChain, ReActTextWorldAgent from langchain.agents.self_ask_with_search.base import ( SelfAskWithSearchChain, create_self_ask_with_search_agent, ) from langchain.agents.structured_chat.base import ( StructuredChatAgent, create_structured_chat_agent, ) from langchain.agents.tool_calling_agent.base import create_tool_calling_agent from langchain.agents.xml.base import XMLAgent, create_xml_agent if TYPE_CHECKING: from langchain_community.agent_toolkits.json.base import create_json_agent from langchain_community.agent_toolkits.load_tools import ( get_all_tool_names, load_huggingface_tool, load_tools, ) from langchain_community.agent_toolkits.openapi.base import create_openapi_agent from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent from langchain_community.agent_toolkits.powerbi.chat_base import ( create_pbi_chat_agent, ) from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent from langchain_community.agent_toolkits.sql.base import create_sql_agent DEPRECATED_CODE = [ "create_csv_agent", "create_pandas_dataframe_agent", "create_spark_dataframe_agent", "create_xorbits_agent", ] # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "create_json_agent": "langchain_community.agent_toolkits.json.base", "create_openapi_agent": "langchain_community.agent_toolkits.openapi.base", "create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base", "create_pbi_chat_agent": "langchain_community.agent_toolkits.powerbi.chat_base", "create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base", "create_sql_agent": "langchain_community.agent_toolkits.sql.base", "load_tools": "langchain_community.agent_toolkits.load_tools", "load_huggingface_tool": "langchain_community.agent_toolkits.load_tools", "get_all_tool_names": "langchain_community.agent_toolkits.load_tools", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Get attr name.""" if name in DEPRECATED_CODE: # Get directory of langchain package HERE = Path(__file__).parents[1] relative_path = as_import_path( Path(__file__).parent, suffix=name, relative_to=HERE ) old_path = "langchain." + relative_path new_path = "langchain_experimental." + relative_path raise ImportError( f"{name} has been moved to langchain experimental. " "See https://github.com/langchain-ai/langchain/discussions/11680" "for more information.\n" f"Please update your import statement from: `{old_path}` to `{new_path}`." ) return _import_attribute(name) __all__ = [ "Agent", "AgentExecutor", "AgentExecutorIterator", "AgentOutputParser", "AgentType", "BaseMultiActionAgent", "BaseSingleActionAgent", "ConversationalAgent", "ConversationalChatAgent", "LLMSingleActionAgent", "MRKLChain", "OpenAIFunctionsAgent", "OpenAIMultiFunctionsAgent", "ReActChain", "ReActTextWorldAgent", "SelfAskWithSearchChain", "StructuredChatAgent", "ZeroShotAgent", "create_json_agent", "create_openapi_agent", "create_pbi_agent", "create_pbi_chat_agent", "create_spark_sql_agent", "create_sql_agent", "create_vectorstore_agent", "create_vectorstore_router_agent", "get_all_tool_names", "initialize_agent", "load_agent", "load_huggingface_tool", "load_tools", "XMLAgent", "create_openai_functions_agent", "create_xml_agent", "create_react_agent", "create_openai_tools_agent", "create_self_ask_with_search_agent", "create_json_chat_agent", "create_structured_chat_agent", "create_tool_calling_agent", "Tool", "tool", ]
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/load_tools.py
from typing import Any from langchain._api import create_importer _importer = create_importer( __package__, fallback_module="langchain_community.agent_toolkits.load_tools" ) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _importer(name)
0
lc_public_repos/langchain/libs/langchain/langchain
lc_public_repos/langchain/libs/langchain/langchain/agents/loading.py
"""Functionality for loading agents.""" import json import logging from pathlib import Path from typing import Any, List, Optional, Union import yaml from langchain_core._api import deprecated from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import Tool from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent from langchain.agents.types import AGENT_TO_CLASS from langchain.chains.loading import load_chain, load_chain_from_config logger = logging.getLogger(__file__) URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/" def _load_agent_from_tools( config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any ) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: config_type = config.pop("_type") if config_type not in AGENT_TO_CLASS: raise ValueError(f"Loading {config_type} agent not supported") agent_cls = AGENT_TO_CLASS[config_type] combined_config = {**config, **kwargs} return agent_cls.from_llm_and_tools(llm, tools, **combined_config) @deprecated("0.1.0", removal="1.0") def load_agent_from_config( config: dict, llm: Optional[BaseLanguageModel] = None, tools: Optional[List[Tool]] = None, **kwargs: Any, ) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: """Load agent from Config Dict. Args: config: Config dict to load agent from. llm: Language model to use as the agent. tools: List of tools this agent has access to. kwargs: Additional keyword arguments passed to the agent executor. Returns: An agent executor. Raises: ValueError: If agent type is not specified in the config. """ if "_type" not in config: raise ValueError("Must specify an agent Type in config") load_from_tools = config.pop("load_from_llm_and_tools", False) if load_from_tools: if llm is None: raise ValueError( "If `load_from_llm_and_tools` is set to True, " "then LLM must be provided" ) if tools is None: raise ValueError( "If `load_from_llm_and_tools` is set to True, " "then tools must be provided" ) return _load_agent_from_tools(config, llm, tools, **kwargs) config_type = config.pop("_type") if config_type not in AGENT_TO_CLASS: raise ValueError(f"Loading {config_type} agent not supported") agent_cls = AGENT_TO_CLASS[config_type] if "llm_chain" in config: config["llm_chain"] = load_chain_from_config(config.pop("llm_chain")) elif "llm_chain_path" in config: config["llm_chain"] = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.") if "output_parser" in config: logger.warning( "Currently loading output parsers on agent is not supported, " "will just use the default one." ) del config["output_parser"] combined_config = {**config, **kwargs} return agent_cls(**combined_config) # type: ignore @deprecated("0.1.0", removal="1.0") def load_agent( path: Union[str, Path], **kwargs: Any ) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: """Unified method for loading an agent from LangChainHub or local fs. Args: path: Path to the agent file. kwargs: Additional keyword arguments passed to the agent executor. Returns: An agent executor. Raises: RuntimeError: If loading from the deprecated github-based Hub is attempted. """ if isinstance(path, str) and path.startswith("lc://"): raise RuntimeError( "Loading from the deprecated github-based Hub is no longer supported. " "Please use the new LangChain Hub at https://smith.langchain.com/hub " "instead." ) return _load_agent_from_file(path, **kwargs) def _load_agent_from_file( file: Union[str, Path], **kwargs: Any ) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: """Load agent from file.""" valid_suffixes = {"json", "yaml"} # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix[1:] == "json": with open(file_path) as f: config = json.load(f) elif file_path.suffix[1:] == "yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.") # Load the agent from the config now. return load_agent_from_config(config, **kwargs)
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/conversational_chat/base.py
"""An agent designed to hold a conversation in addition to using tools.""" from __future__ import annotations from typing import Any, List, Optional, Sequence, Tuple from langchain_core._api import deprecated from langchain_core.agents import AgentAction from langchain_core.callbacks import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import AIMessage, BaseMessage, HumanMessage from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts import BasePromptTemplate from langchain_core.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ) from langchain_core.tools import BaseTool from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.conversational_chat.output_parser import ConvoOutputParser from langchain.agents.conversational_chat.prompt import ( PREFIX, SUFFIX, TEMPLATE_TOOL_RESPONSE, ) from langchain.agents.utils import validate_tools_single_input from langchain.chains import LLMChain @deprecated("0.1.0", alternative="create_json_chat_agent", removal="1.0") class ConversationalChatAgent(Agent): """An agent designed to hold a conversation in addition to using tools.""" output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) """Output parser for the agent.""" template_tool_response: str = TEMPLATE_TOOL_RESPONSE """Template for the tool response.""" @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ConvoOutputParser() @property def _agent_type(self) -> str: raise NotImplementedError @property def observation_prefix(self) -> str: """Prefix to append the observation with. Returns: "Observation: " """ return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with. Returns: "Thought: " """ return "Thought:" @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: super()._validate_tools(tools) validate_tools_single_input(cls.__name__, tools) @classmethod def create_prompt( cls, tools: Sequence[BaseTool], system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: Optional[List[str]] = None, output_parser: Optional[BaseOutputParser] = None, ) -> BasePromptTemplate: """Create a prompt for the agent. Args: tools: The tools to use. system_message: The system message to use. Defaults to the PREFIX. human_message: The human message to use. Defaults to the SUFFIX. input_variables: The input variables to use. Defaults to None. output_parser: The output parser to use. Defaults to None. Returns: A PromptTemplate. """ tool_strings = "\n".join( [f"> {tool.name}: {tool.description}" for tool in tools] ) tool_names = ", ".join([tool.name for tool in tools]) _output_parser = output_parser or cls._get_default_output_parser() format_instructions = human_message.format( format_instructions=_output_parser.get_format_instructions() ) final_prompt = format_instructions.format( tool_names=tool_names, tools=tool_strings ) if input_variables is None: input_variables = ["input", "chat_history", "agent_scratchpad"] messages = [ SystemMessagePromptTemplate.from_template(system_message), MessagesPlaceholder(variable_name="chat_history"), HumanMessagePromptTemplate.from_template(final_prompt), MessagesPlaceholder(variable_name="agent_scratchpad"), ] return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type] def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> List[BaseMessage]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts: List[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(content=action.log)) human_message = HumanMessage( content=self.template_tool_response.format(observation=observation) ) thoughts.append(human_message) return thoughts @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools. Args: llm: The language model to use. tools: A list of tools to use. callback_manager: The callback manager to use. Default is None. output_parser: The output parser to use. Default is None. system_message: The system message to use. Default is PREFIX. human_message: The human message to use. Default is SUFFIX. input_variables: The input variables to use. Default is None. **kwargs: Any additional arguments. Returns: An agent. """ cls._validate_tools(tools) _output_parser = output_parser or cls._get_default_output_parser() prompt = cls.create_prompt( tools, system_message=system_message, human_message=human_message, input_variables=input_variables, output_parser=_output_parser, ) llm_chain = LLMChain( # type: ignore[misc] llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, )
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/conversational_chat/output_parser.py
from __future__ import annotations from typing import Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException from langchain_core.utils.json import parse_json_markdown from langchain.agents import AgentOutputParser from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS # Define a class that parses output for conversational agents class ConvoOutputParser(AgentOutputParser): """Output parser for the conversational agent.""" format_instructions: str = FORMAT_INSTRUCTIONS """Default formatting instructions""" def get_format_instructions(self) -> str: """Returns formatting instructions for the given output parser.""" return self.format_instructions def parse(self, text: str) -> Union[AgentAction, AgentFinish]: """Attempts to parse the given text into an AgentAction or AgentFinish. Raises: OutputParserException if parsing fails. """ try: # Attempt to parse the text into a structured format (assumed to be JSON # stored as markdown) response = parse_json_markdown(text) # If the response contains an 'action' and 'action_input' if "action" in response and "action_input" in response: action, action_input = response["action"], response["action_input"] # If the action indicates a final answer, return an AgentFinish if action == "Final Answer": return AgentFinish({"output": action_input}, text) else: # Otherwise, return an AgentAction with the specified action and # input return AgentAction(action, action_input, text) else: # If the necessary keys aren't present in the response, raise an # exception raise OutputParserException( f"Missing 'action' or 'action_input' in LLM output: {text}" ) except Exception as e: # If any other exception is raised during parsing, also raise an # OutputParserException raise OutputParserException(f"Could not parse LLM output: {text}") from e @property def _type(self) -> str: return "conversational_chat"
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/conversational_chat/__init__.py
"""An agent designed to hold a conversation in addition to using tools."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/conversational_chat/prompt.py
# flake8: noqa PREFIX = """Assistant is a large language model trained by OpenAI. Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.""" FORMAT_INSTRUCTIONS = """RESPONSE FORMAT INSTRUCTIONS ---------------------------- When responding to me, please output a response in one of two formats: **Option 1:** Use this if you want the human to use a tool. Markdown code snippet formatted in the following schema: ```json {{{{ "action": string, \\\\ The action to take. Must be one of {tool_names} "action_input": string \\\\ The input to the action }}}} ``` **Option #2:** Use this if you want to respond directly to the human. Markdown code snippet formatted in the following schema: ```json {{{{ "action": "Final Answer", "action_input": string \\\\ You should put what you want to return to use here }}}} ```""" SUFFIX = """TOOLS ------ Assistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are: {{tools}} {format_instructions} USER'S INPUT -------------------- Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else): {{{{input}}}}""" TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE: --------------------- {observation} USER'S INPUT -------------------- Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/openai_assistant/base.py
from __future__ import annotations import asyncio import json from json import JSONDecodeError from time import sleep from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union, ) from langchain_core.agents import AgentAction, AgentFinish from langchain_core.callbacks import CallbackManager from langchain_core.load import dumpd from langchain_core.runnables import RunnableConfig, RunnableSerializable, ensure_config from langchain_core.tools import BaseTool from langchain_core.utils.function_calling import convert_to_openai_tool from pydantic import BaseModel, Field, model_validator from typing_extensions import Self if TYPE_CHECKING: import openai from openai.types.beta.threads import ThreadMessage from openai.types.beta.threads.required_action_function_tool_call import ( RequiredActionFunctionToolCall, ) class OpenAIAssistantFinish(AgentFinish): """AgentFinish with run and thread metadata. Parameters: run_id: Run id. thread_id: Thread id. """ run_id: str thread_id: str @classmethod def is_lc_serializable(cls) -> bool: """Check if the class is serializable by LangChain. Returns: False """ return False class OpenAIAssistantAction(AgentAction): """AgentAction with info needed to submit custom tool output to existing run. Parameters: tool_call_id: Tool call id. run_id: Run id. thread_id: Thread id """ tool_call_id: str run_id: str thread_id: str @classmethod def is_lc_serializable(cls) -> bool: """Check if the class is serializable by LangChain. Returns: False """ return False def _get_openai_client() -> openai.OpenAI: try: import openai return openai.OpenAI() except ImportError as e: raise ImportError( "Unable to import openai, please install with `pip install openai`." ) from e except AttributeError as e: raise AttributeError( "Please make sure you are using a v1.1-compatible version of openai. You " 'can install with `pip install "openai>=1.1"`.' ) from e def _get_openai_async_client() -> openai.AsyncOpenAI: try: import openai return openai.AsyncOpenAI() except ImportError as e: raise ImportError( "Unable to import openai, please install with `pip install openai`." ) from e except AttributeError as e: raise AttributeError( "Please make sure you are using a v1.1-compatible version of openai. You " 'can install with `pip install "openai>=1.1"`.' ) from e def _is_assistants_builtin_tool( tool: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool], ) -> bool: """Determine if tool corresponds to OpenAI Assistants built-in.""" assistants_builtin_tools = ("code_interpreter", "retrieval") return ( isinstance(tool, dict) and ("type" in tool) and (tool["type"] in assistants_builtin_tools) ) def _get_assistants_tool( tool: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool], ) -> Dict[str, Any]: """Convert a raw function/class to an OpenAI tool. Note that OpenAI assistants supports several built-in tools, such as "code_interpreter" and "retrieval." """ if _is_assistants_builtin_tool(tool): return tool # type: ignore else: return convert_to_openai_tool(tool) OutputType = Union[ List[OpenAIAssistantAction], OpenAIAssistantFinish, List["ThreadMessage"], List["RequiredActionFunctionToolCall"], ] class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]): """Run an OpenAI Assistant. Example using OpenAI tools: .. code-block:: python from langchain_experimental.openai_assistant import OpenAIAssistantRunnable interpreter_assistant = OpenAIAssistantRunnable.create_assistant( name="langchain assistant", instructions="You are a personal math tutor. Write and run code to answer math questions.", tools=[{"type": "code_interpreter"}], model="gpt-4-1106-preview" ) output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"}) Example using custom tools and AgentExecutor: .. code-block:: python from langchain_experimental.openai_assistant import OpenAIAssistantRunnable from langchain.agents import AgentExecutor from langchain.tools import E2BDataAnalysisTool tools = [E2BDataAnalysisTool(api_key="...")] agent = OpenAIAssistantRunnable.create_assistant( name="langchain assistant e2b tool", instructions="You are a personal math tutor. Write and run code to answer math questions.", tools=tools, model="gpt-4-1106-preview", as_agent=True ) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"content": "What's 10 - 4 raised to the 2.7"}) Example using custom tools and custom execution: .. code-block:: python from langchain_experimental.openai_assistant import OpenAIAssistantRunnable from langchain.agents import AgentExecutor from langchain_core.agents import AgentFinish from langchain.tools import E2BDataAnalysisTool tools = [E2BDataAnalysisTool(api_key="...")] agent = OpenAIAssistantRunnable.create_assistant( name="langchain assistant e2b tool", instructions="You are a personal math tutor. Write and run code to answer math questions.", tools=tools, model="gpt-4-1106-preview", as_agent=True ) def execute_agent(agent, tools, input): tool_map = {tool.name: tool for tool in tools} response = agent.invoke(input) while not isinstance(response, AgentFinish): tool_outputs = [] for action in response: tool_output = tool_map[action.tool].invoke(action.tool_input) tool_outputs.append({"output": tool_output, "tool_call_id": action.tool_call_id}) response = agent.invoke( { "tool_outputs": tool_outputs, "run_id": action.run_id, "thread_id": action.thread_id } ) return response response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"}) next_response = execute_agent(agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id}) """ # noqa: E501 client: Any = Field(default_factory=_get_openai_client) """OpenAI or AzureOpenAI client.""" async_client: Any = None """OpenAI or AzureOpenAI async client.""" assistant_id: str """OpenAI assistant id.""" check_every_ms: float = 1_000.0 """Frequency with which to check run progress in ms.""" as_agent: bool = False """Use as a LangChain agent, compatible with the AgentExecutor.""" @model_validator(mode="after") def validate_async_client(self) -> Self: if self.async_client is None: import openai api_key = self.client.api_key self.async_client = openai.AsyncOpenAI(api_key=api_key) return self @classmethod def create_assistant( cls, name: str, instructions: str, tools: Sequence[Union[BaseTool, dict]], model: str, *, client: Optional[Union[openai.OpenAI, openai.AzureOpenAI]] = None, **kwargs: Any, ) -> OpenAIAssistantRunnable: """Create an OpenAI Assistant and instantiate the Runnable. Args: name: Assistant name. instructions: Assistant instructions. tools: Assistant tools. Can be passed in OpenAI format or as BaseTools. model: Assistant model to use. client: OpenAI or AzureOpenAI client. Will create a default OpenAI client if not specified. kwargs: Additional arguments. Returns: OpenAIAssistantRunnable configured to run using the created assistant. """ client = client or _get_openai_client() assistant = client.beta.assistants.create( name=name, instructions=instructions, tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore model=model, ) return cls(assistant_id=assistant.id, client=client, **kwargs) def invoke( self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> OutputType: """Invoke assistant. Args: input: Runnable input dict that can have: content: User message when starting a new run. thread_id: Existing thread to use. run_id: Existing run to use. Should only be supplied when providing the tool output for a required action after an initial invocation. message_metadata: Metadata to associate with new message. thread_metadata: Metadata to associate with new thread. Only relevant when new thread being created. instructions: Additional run instructions. model: Override Assistant model for this run. tools: Override Assistant tools for this run. run_metadata: Metadata to associate with new run. config: Runnable config. Defaults to None. Return: If self.as_agent, will return Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise, will return OpenAI types Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]]. """ config = ensure_config(config) callback_manager = CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), inheritable_tags=config.get("tags"), inheritable_metadata=config.get("metadata"), ) run_manager = callback_manager.on_chain_start( dumpd(self), input, name=config.get("run_name") or self.get_name() ) try: # Being run within AgentExecutor and there are tool outputs to submit. if self.as_agent and input.get("intermediate_steps"): tool_outputs = self._parse_intermediate_steps( input["intermediate_steps"] ) run = self.client.beta.threads.runs.submit_tool_outputs(**tool_outputs) # Starting a new thread and a new run. elif "thread_id" not in input: thread = { "messages": [ { "role": "user", "content": input["content"], "metadata": input.get("message_metadata"), } ], "metadata": input.get("thread_metadata"), } run = self._create_thread_and_run(input, thread) # Starting a new run in an existing thread. elif "run_id" not in input: _ = self.client.beta.threads.messages.create( input["thread_id"], content=input["content"], role="user", metadata=input.get("message_metadata"), ) run = self._create_run(input) # Submitting tool outputs to an existing run, outside the AgentExecutor # framework. else: run = self.client.beta.threads.runs.submit_tool_outputs(**input) run = self._wait_for_run(run.id, run.thread_id) except BaseException as e: run_manager.on_chain_error(e) raise e try: response = self._get_response(run) except BaseException as e: run_manager.on_chain_error(e, metadata=run.dict()) raise e else: run_manager.on_chain_end(response) return response @classmethod async def acreate_assistant( cls, name: str, instructions: str, tools: Sequence[Union[BaseTool, dict]], model: str, *, async_client: Optional[ Union[openai.AsyncOpenAI, openai.AsyncAzureOpenAI] ] = None, **kwargs: Any, ) -> OpenAIAssistantRunnable: """Async create an AsyncOpenAI Assistant and instantiate the Runnable. Args: name: Assistant name. instructions: Assistant instructions. tools: Assistant tools. Can be passed in OpenAI format or as BaseTools. model: Assistant model to use. async_client: AsyncOpenAI client. Will create default async_client if not specified. Returns: AsyncOpenAIAssistantRunnable configured to run using the created assistant. """ async_client = async_client or _get_openai_async_client() openai_tools = [_get_assistants_tool(tool) for tool in tools] assistant = await async_client.beta.assistants.create( name=name, instructions=instructions, tools=openai_tools, # type: ignore model=model, ) return cls(assistant_id=assistant.id, async_client=async_client, **kwargs) async def ainvoke( self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> OutputType: """Async invoke assistant. Args: input: Runnable input dict that can have: content: User message when starting a new run. thread_id: Existing thread to use. run_id: Existing run to use. Should only be supplied when providing the tool output for a required action after an initial invocation. message_metadata: Metadata to associate with a new message. thread_metadata: Metadata to associate with new thread. Only relevant when a new thread is created. instructions: Additional run instructions. model: Override Assistant model for this run. tools: Override Assistant tools for this run. run_metadata: Metadata to associate with new run. config: Runnable config. Defaults to None. kwargs: Additional arguments. Return: If self.as_agent, will return Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise, will return OpenAI types Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]]. """ config = config or {} callback_manager = CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), inheritable_tags=config.get("tags"), inheritable_metadata=config.get("metadata"), ) run_manager = callback_manager.on_chain_start( dumpd(self), input, name=config.get("run_name") or self.get_name() ) try: # Being run within AgentExecutor and there are tool outputs to submit. if self.as_agent and input.get("intermediate_steps"): tool_outputs = await self._aparse_intermediate_steps( input["intermediate_steps"] ) run = await self.async_client.beta.threads.runs.submit_tool_outputs( **tool_outputs ) # Starting a new thread and a new run. elif "thread_id" not in input: thread = { "messages": [ { "role": "user", "content": input["content"], "metadata": input.get("message_metadata"), } ], "metadata": input.get("thread_metadata"), } run = await self._acreate_thread_and_run(input, thread) # Starting a new run in an existing thread. elif "run_id" not in input: _ = await self.async_client.beta.threads.messages.create( input["thread_id"], content=input["content"], role="user", metadata=input.get("message_metadata"), ) run = await self._acreate_run(input) # Submitting tool outputs to an existing run, outside the AgentExecutor # framework. else: run = await self.async_client.beta.threads.runs.submit_tool_outputs( **input ) run = await self._await_for_run(run.id, run.thread_id) except BaseException as e: run_manager.on_chain_error(e) raise e try: response = self._get_response(run) except BaseException as e: run_manager.on_chain_error(e, metadata=run.dict()) raise e else: run_manager.on_chain_end(response) return response def _parse_intermediate_steps( self, intermediate_steps: List[Tuple[OpenAIAssistantAction, str]] ) -> dict: last_action, last_output = intermediate_steps[-1] run = self._wait_for_run(last_action.run_id, last_action.thread_id) required_tool_call_ids = set() if run.required_action: required_tool_call_ids = { tc.id for tc in run.required_action.submit_tool_outputs.tool_calls } tool_outputs = [ {"output": str(output), "tool_call_id": action.tool_call_id} for action, output in intermediate_steps if action.tool_call_id in required_tool_call_ids ] submit_tool_outputs = { "tool_outputs": tool_outputs, "run_id": last_action.run_id, "thread_id": last_action.thread_id, } return submit_tool_outputs def _create_run(self, input: dict) -> Any: params = { k: v for k, v in input.items() if k in ("instructions", "model", "tools", "run_metadata") } return self.client.beta.threads.runs.create( input["thread_id"], assistant_id=self.assistant_id, **params, ) def _create_thread_and_run(self, input: dict, thread: dict) -> Any: params = { k: v for k, v in input.items() if k in ("instructions", "model", "tools", "run_metadata") } run = self.client.beta.threads.create_and_run( assistant_id=self.assistant_id, thread=thread, **params, ) return run def _get_response(self, run: Any) -> Any: # TODO: Pagination if run.status == "completed": import openai major_version = int(openai.version.VERSION.split(".")[0]) minor_version = int(openai.version.VERSION.split(".")[1]) version_gte_1_14 = (major_version > 1) or ( major_version == 1 and minor_version >= 14 ) messages = self.client.beta.threads.messages.list( run.thread_id, order="asc" ) new_messages = [msg for msg in messages if msg.run_id == run.id] if not self.as_agent: return new_messages answer: Any = [ msg_content for msg in new_messages for msg_content in msg.content ] if all( ( isinstance(content, openai.types.beta.threads.TextContentBlock) if version_gte_1_14 else isinstance( content, openai.types.beta.threads.MessageContentText ) ) for content in answer ): answer = "\n".join(content.text.value for content in answer) return OpenAIAssistantFinish( return_values={ "output": answer, "thread_id": run.thread_id, "run_id": run.id, }, log="", run_id=run.id, thread_id=run.thread_id, ) elif run.status == "requires_action": if not self.as_agent: return run.required_action.submit_tool_outputs.tool_calls actions = [] for tool_call in run.required_action.submit_tool_outputs.tool_calls: function = tool_call.function try: args = json.loads(function.arguments, strict=False) except JSONDecodeError as e: raise ValueError( f"Received invalid JSON function arguments: " f"{function.arguments} for function {function.name}" ) from e if len(args) == 1 and "__arg1" in args: args = args["__arg1"] actions.append( OpenAIAssistantAction( tool=function.name, tool_input=args, tool_call_id=tool_call.id, log="", run_id=run.id, thread_id=run.thread_id, ) ) return actions else: run_info = json.dumps(run.dict(), indent=2) raise ValueError( f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})" ) def _wait_for_run(self, run_id: str, thread_id: str) -> Any: in_progress = True while in_progress: run = self.client.beta.threads.runs.retrieve(run_id, thread_id=thread_id) in_progress = run.status in ("in_progress", "queued") if in_progress: sleep(self.check_every_ms / 1000) return run async def _aparse_intermediate_steps( self, intermediate_steps: List[Tuple[OpenAIAssistantAction, str]] ) -> dict: last_action, last_output = intermediate_steps[-1] run = await self._wait_for_run(last_action.run_id, last_action.thread_id) required_tool_call_ids = set() if run.required_action: required_tool_call_ids = { tc.id for tc in run.required_action.submit_tool_outputs.tool_calls } tool_outputs = [ {"output": str(output), "tool_call_id": action.tool_call_id} for action, output in intermediate_steps if action.tool_call_id in required_tool_call_ids ] submit_tool_outputs = { "tool_outputs": tool_outputs, "run_id": last_action.run_id, "thread_id": last_action.thread_id, } return submit_tool_outputs async def _acreate_run(self, input: dict) -> Any: params = { k: v for k, v in input.items() if k in ("instructions", "model", "tools", "run_metadata") } return await self.async_client.beta.threads.runs.create( input["thread_id"], assistant_id=self.assistant_id, **params, ) async def _acreate_thread_and_run(self, input: dict, thread: dict) -> Any: params = { k: v for k, v in input.items() if k in ("instructions", "model", "tools", "run_metadata") } run = await self.async_client.beta.threads.create_and_run( assistant_id=self.assistant_id, thread=thread, **params, ) return run async def _aget_response(self, run: Any) -> Any: # TODO: Pagination if run.status == "completed": import openai major_version = int(openai.version.VERSION.split(".")[0]) minor_version = int(openai.version.VERSION.split(".")[1]) version_gte_1_14 = (major_version > 1) or ( major_version == 1 and minor_version >= 14 ) messages = await self.async_client.beta.threads.messages.list( run.thread_id, order="asc" ) new_messages = [msg for msg in messages if msg.run_id == run.id] if not self.as_agent: return new_messages answer: Any = [ msg_content for msg in new_messages for msg_content in msg.content ] if all( ( isinstance(content, openai.types.beta.threads.TextContentBlock) if version_gte_1_14 else isinstance( content, openai.types.beta.threads.MessageContentText ) ) for content in answer ): answer = "\n".join(content.text.value for content in answer) return OpenAIAssistantFinish( return_values={ "output": answer, "thread_id": run.thread_id, "run_id": run.id, }, log="", run_id=run.id, thread_id=run.thread_id, ) elif run.status == "requires_action": if not self.as_agent: return run.required_action.submit_tool_outputs.tool_calls actions = [] for tool_call in run.required_action.submit_tool_outputs.tool_calls: function = tool_call.function try: args = json.loads(function.arguments, strict=False) except JSONDecodeError as e: raise ValueError( f"Received invalid JSON function arguments: " f"{function.arguments} for function {function.name}" ) from e if len(args) == 1 and "__arg1" in args: args = args["__arg1"] actions.append( OpenAIAssistantAction( tool=function.name, tool_input=args, tool_call_id=tool_call.id, log="", run_id=run.id, thread_id=run.thread_id, ) ) return actions else: run_info = json.dumps(run.dict(), indent=2) raise ValueError( f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})" ) async def _await_for_run(self, run_id: str, thread_id: str) -> Any: in_progress = True while in_progress: run = await self.async_client.beta.threads.runs.retrieve( run_id, thread_id=thread_id ) in_progress = run.status in ("in_progress", "queued") if in_progress: await asyncio.sleep(self.check_every_ms / 1000) return run
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/openai_assistant/__init__.py
from langchain.agents.openai_assistant.base import OpenAIAssistantRunnable __all__ = ["OpenAIAssistantRunnable"]
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/chat/base.py
from typing import Any, List, Optional, Sequence, Tuple from langchain_core._api import deprecated from langchain_core.agents import AgentAction from langchain_core.callbacks import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_core.tools import BaseTool from pydantic import Field from langchain._api.deprecation import AGENT_DEPRECATION_WARNING from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.chat.output_parser import ChatOutputParser from langchain.agents.chat.prompt import ( FORMAT_INSTRUCTIONS, HUMAN_MESSAGE, SYSTEM_MESSAGE_PREFIX, SYSTEM_MESSAGE_SUFFIX, ) from langchain.agents.utils import validate_tools_single_input from langchain.chains.llm import LLMChain @deprecated( "0.1.0", message=AGENT_DEPRECATION_WARNING, removal="1.0", ) class ChatAgent(Agent): """Chat Agent.""" output_parser: AgentOutputParser = Field(default_factory=ChatOutputParser) """Output parser for the agent.""" @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> str: agent_scratchpad = super()._construct_scratchpad(intermediate_steps) if not isinstance(agent_scratchpad, str): raise ValueError("agent_scratchpad should be of type string.") if agent_scratchpad: return ( f"This was your previous work " f"(but I haven't seen any of it! I only see what " f"you return as final answer):\n{agent_scratchpad}" ) else: return agent_scratchpad @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ChatOutputParser() @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: super()._validate_tools(tools) validate_tools_single_input(class_name=cls.__name__, tools=tools) @property def _stop(self) -> List[str]: return ["Observation:"] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], system_message_prefix: str = SYSTEM_MESSAGE_PREFIX, system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX, human_message: str = HUMAN_MESSAGE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, ) -> BasePromptTemplate: """Create a prompt from a list of tools. Args: tools: A list of tools. system_message_prefix: The system message prefix. Default is SYSTEM_MESSAGE_PREFIX. system_message_suffix: The system message suffix. Default is SYSTEM_MESSAGE_SUFFIX. human_message: The human message. Default is HUMAN_MESSAGE. format_instructions: The format instructions. Default is FORMAT_INSTRUCTIONS. input_variables: The input variables. Default is None. Returns: A prompt template. """ tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join( [ system_message_prefix, tool_strings, format_instructions, system_message_suffix, ] ) messages = [ SystemMessagePromptTemplate.from_template(template), HumanMessagePromptTemplate.from_template(human_message), ] if input_variables is None: input_variables = ["input", "agent_scratchpad"] return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, system_message_prefix: str = SYSTEM_MESSAGE_PREFIX, system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX, human_message: str = HUMAN_MESSAGE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools. Args: llm: The language model. tools: A list of tools. callback_manager: The callback manager. Default is None. output_parser: The output parser. Default is None. system_message_prefix: The system message prefix. Default is SYSTEM_MESSAGE_PREFIX. system_message_suffix: The system message suffix. Default is SYSTEM_MESSAGE_SUFFIX. human_message: The human message. Default is HUMAN_MESSAGE. format_instructions: The format instructions. Default is FORMAT_INSTRUCTIONS. input_variables: The input variables. Default is None. kwargs: Additional keyword arguments. Returns: An agent. """ cls._validate_tools(tools) prompt = cls.create_prompt( tools, system_message_prefix=system_message_prefix, system_message_suffix=system_message_suffix, human_message=human_message, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @property def _agent_type(self) -> str: raise ValueError
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/chat/output_parser.py
import json import re from typing import Pattern, Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException from langchain.agents.agent import AgentOutputParser from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS FINAL_ANSWER_ACTION = "Final Answer:" class ChatOutputParser(AgentOutputParser): """Output parser for the chat agent.""" format_instructions: str = FORMAT_INSTRUCTIONS """Default formatting instructions""" pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL) """Regex pattern to parse the output.""" def get_format_instructions(self) -> str: """Returns formatting instructions for the given output parser.""" return self.format_instructions def parse(self, text: str) -> Union[AgentAction, AgentFinish]: """Parse the output from the agent into an AgentAction or AgentFinish object. Args: text: The text to parse. Returns: An AgentAction or AgentFinish object. Raises: OutputParserException: If the output could not be parsed. ValueError: If the action could not be found. """ includes_answer = FINAL_ANSWER_ACTION in text try: found = self.pattern.search(text) if not found: # Fast fail to parse Final Answer. raise ValueError("action not found") action = found.group(1) response = json.loads(action.strip()) includes_action = "action" in response if includes_answer and includes_action: raise OutputParserException( "Parsing LLM output produced a final answer " f"and a parse-able action: {text}" ) return AgentAction( response["action"], response.get("action_input", {}), text ) except Exception as exc: if not includes_answer: raise OutputParserException( f"Could not parse LLM output: {text}" ) from exc output = text.split(FINAL_ANSWER_ACTION)[-1].strip() return AgentFinish({"output": output}, text) @property def _type(self) -> str: return "chat"
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/chat/prompt.py
# flake8: noqa SYSTEM_MESSAGE_PREFIX = """Answer the following questions as best you can. You have access to the following tools:""" FORMAT_INSTRUCTIONS = """The way you use the tools is by specifying a json blob. Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here). The only values that should be in the "action" field are: {tool_names} The $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB: ``` {{{{ "action": $TOOL_NAME, "action_input": $INPUT }}}} ``` ALWAYS use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: ``` $JSON_BLOB ``` Observation: the result of the action ... (this Thought/Action/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question""" SYSTEM_MESSAGE_SUFFIX = """Begin! Reminder to always use the exact characters `Final Answer` when responding.""" HUMAN_MESSAGE = "{input}\n\n{agent_scratchpad}"
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/format_scratchpad/xml.py
from typing import List, Tuple from langchain_core.agents import AgentAction def format_xml( intermediate_steps: List[Tuple[AgentAction, str]], ) -> str: """Format the intermediate steps as XML. Args: intermediate_steps: The intermediate steps. Returns: The intermediate steps as XML. """ log = "" for action, observation in intermediate_steps: log += ( f"<tool>{action.tool}</tool><tool_input>{action.tool_input}" f"</tool_input><observation>{observation}</observation>" ) return log
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/format_scratchpad/log.py
from typing import List, Tuple from langchain_core.agents import AgentAction def format_log_to_str( intermediate_steps: List[Tuple[AgentAction, str]], observation_prefix: str = "Observation: ", llm_prefix: str = "Thought: ", ) -> str: """Construct the scratchpad that lets the agent continue its thought process. Args: intermediate_steps: List of tuples of AgentAction and observation strings. observation_prefix: Prefix to append the observation with. Defaults to "Observation: ". llm_prefix: Prefix to append the llm call with. Defaults to "Thought: ". Returns: str: The scratchpad. """ thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}" return thoughts
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/format_scratchpad/openai_functions.py
import json from typing import List, Sequence, Tuple from langchain_core.agents import AgentAction, AgentActionMessageLog from langchain_core.messages import AIMessage, BaseMessage, FunctionMessage def _convert_agent_action_to_messages( agent_action: AgentAction, observation: str ) -> List[BaseMessage]: """Convert an agent action to a message. This code is used to reconstruct the original AI message from the agent action. Args: agent_action: Agent action to convert. Returns: AIMessage or the previous messages plus a FunctionMessage that corresponds to the original tool invocation """ if isinstance(agent_action, AgentActionMessageLog): return list(agent_action.message_log) + [ _create_function_message(agent_action, observation) ] else: return [AIMessage(content=agent_action.log)] def _create_function_message( agent_action: AgentAction, observation: str ) -> FunctionMessage: """Convert agent action and observation into a function message. Args: agent_action: the tool invocation request from the agent. observation: the result of the tool invocation. Returns: FunctionMessage that corresponds to the original tool invocation. Raises: ValueError: if the observation cannot be converted to a string. """ if not isinstance(observation, str): try: content = json.dumps(observation, ensure_ascii=False) except Exception: content = str(observation) else: content = observation return FunctionMessage( name=agent_action.tool, content=content, ) def format_to_openai_function_messages( intermediate_steps: Sequence[Tuple[AgentAction, str]], ) -> List[BaseMessage]: """Convert (AgentAction, tool output) tuples into FunctionMessages. Args: intermediate_steps: Steps the LLM has taken to date, along with observations Returns: list of messages to send to the LLM for the next prediction Raises: ValueError: if the observation cannot be converted to a string. """ messages = [] for agent_action, observation in intermediate_steps: messages.extend(_convert_agent_action_to_messages(agent_action, observation)) return messages # Backwards compatibility format_to_openai_functions = format_to_openai_function_messages
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/format_scratchpad/openai_tools.py
from langchain.agents.format_scratchpad.tools import ( format_to_tool_messages as format_to_openai_tool_messages, ) __all__ = ["format_to_openai_tool_messages"]
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/format_scratchpad/tools.py
import json from typing import List, Sequence, Tuple from langchain_core.agents import AgentAction from langchain_core.messages import ( AIMessage, BaseMessage, ToolMessage, ) from langchain.agents.output_parsers.tools import ToolAgentAction def _create_tool_message( agent_action: ToolAgentAction, observation: str ) -> ToolMessage: """Convert agent action and observation into a tool message. Args: agent_action: the tool invocation request from the agent. observation: the result of the tool invocation. Returns: ToolMessage that corresponds to the original tool invocation. Raises: ValueError: if the observation cannot be converted to a string. """ if not isinstance(observation, str): try: content = json.dumps(observation, ensure_ascii=False) except Exception: content = str(observation) else: content = observation return ToolMessage( tool_call_id=agent_action.tool_call_id, content=content, additional_kwargs={"name": agent_action.tool}, ) def format_to_tool_messages( intermediate_steps: Sequence[Tuple[AgentAction, str]], ) -> List[BaseMessage]: """Convert (AgentAction, tool output) tuples into ToolMessages. Args: intermediate_steps: Steps the LLM has taken to date, along with observations. Returns: list of messages to send to the LLM for the next prediction. """ messages = [] for agent_action, observation in intermediate_steps: if isinstance(agent_action, ToolAgentAction): new_messages = list(agent_action.message_log) + [ _create_tool_message(agent_action, observation) ] messages.extend([new for new in new_messages if new not in messages]) else: messages.append(AIMessage(content=agent_action.log)) return messages
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/format_scratchpad/log_to_messages.py
from typing import List, Tuple from langchain_core.agents import AgentAction from langchain_core.messages import AIMessage, BaseMessage, HumanMessage def format_log_to_messages( intermediate_steps: List[Tuple[AgentAction, str]], template_tool_response: str = "{observation}", ) -> List[BaseMessage]: """Construct the scratchpad that lets the agent continue its thought process. Args: intermediate_steps: List of tuples of AgentAction and observation strings. template_tool_response: Template to format the observation with. Defaults to "{observation}". Returns: List[BaseMessage]: The scratchpad. """ thoughts: List[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(content=action.log)) human_message = HumanMessage( content=template_tool_response.format(observation=observation) ) thoughts.append(human_message) return thoughts
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/format_scratchpad/__init__.py
"""Logic for formatting intermediate steps into an agent scratchpad. Intermediate steps refers to the list of (AgentAction, observation) tuples that result from previous iterations of the agent. Depending on the prompting strategy you are using, you may want to format these differently before passing them into the LLM. """ from langchain.agents.format_scratchpad.log import format_log_to_str from langchain.agents.format_scratchpad.log_to_messages import format_log_to_messages from langchain.agents.format_scratchpad.openai_functions import ( format_to_openai_function_messages, format_to_openai_functions, ) from langchain.agents.format_scratchpad.tools import format_to_tool_messages from langchain.agents.format_scratchpad.xml import format_xml __all__ = [ "format_xml", "format_to_openai_function_messages", "format_to_openai_functions", "format_to_tool_messages", "format_log_to_str", "format_log_to_messages", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/json_chat/base.py
from typing import List, Sequence, Union from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts.chat import ChatPromptTemplate from langchain_core.runnables import Runnable, RunnablePassthrough from langchain_core.tools import BaseTool from langchain_core.tools.render import ToolsRenderer, render_text_description from langchain.agents.format_scratchpad import format_log_to_messages from langchain.agents.json_chat.prompt import TEMPLATE_TOOL_RESPONSE from langchain.agents.output_parsers import JSONAgentOutputParser def create_json_chat_agent( llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate, stop_sequence: Union[bool, List[str]] = True, tools_renderer: ToolsRenderer = render_text_description, template_tool_response: str = TEMPLATE_TOOL_RESPONSE, ) -> Runnable: """Create an agent that uses JSON to format its logic, build for Chat Models. Args: llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use. See Prompt section below for more. stop_sequence: bool or list of str. If True, adds a stop token of "Observation:" to avoid hallucinates. If False, does not add a stop token. If a list of str, uses the provided list as the stop tokens. Default is True. You may to set this to False if the LLM you are using does not support stop sequences. tools_renderer: This controls how the tools are converted into a string and then passed into the LLM. Default is `render_text_description`. template_tool_response: Template prompt that uses the tool response (observation) to make the LLM generate the next action to take. Default is TEMPLATE_TOOL_RESPONSE. Returns: A Runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an AgentAction or AgentFinish. Raises: ValueError: If the prompt is missing required variables. ValueError: If the template_tool_response is missing the required variable 'observation'. Example: .. code-block:: python from langchain import hub from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_json_chat_agent prompt = hub.pull("hwchase17/react-chat-json") model = ChatOpenAI() tools = ... agent = create_json_chat_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"input": "hi"}) # Using with chat history from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", "chat_history": [ HumanMessage(content="hi! my name is bob"), AIMessage(content="Hello Bob! How can I assist you today?"), ], } ) Prompt: The prompt must have input keys: * `tools`: contains descriptions and arguments for each tool. * `tool_names`: contains all tool names. * `agent_scratchpad`: must be a MessagesPlaceholder. Contains previous agent actions and tool outputs as messages. Here's an example: .. code-block:: python from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder system = '''Assistant is a large language model trained by OpenAI. Assistant is designed to be able to assist with a wide range of tasks, from answering \ simple questions to providing in-depth explanations and discussions on a wide range of \ topics. As a language model, Assistant is able to generate human-like text based on \ the input it receives, allowing it to engage in natural-sounding conversations and \ provide responses that are coherent and relevant to the topic at hand. Assistant is constantly learning and improving, and its capabilities are constantly \ evolving. It is able to process and understand large amounts of text, and can use this \ knowledge to provide accurate and informative responses to a wide range of questions. \ Additionally, Assistant is able to generate its own text based on the input it \ receives, allowing it to engage in discussions and provide explanations and \ descriptions on a wide range of topics. Overall, Assistant is a powerful system that can help with a wide range of tasks \ and provide valuable insights and information on a wide range of topics. Whether \ you need help with a specific question or just want to have a conversation about \ a particular topic, Assistant is here to assist.''' human = '''TOOLS ------ Assistant can ask the user to use tools to look up information that may be helpful in \ answering the users original question. The tools the human can use are: {tools} RESPONSE FORMAT INSTRUCTIONS ---------------------------- When responding to me, please output a response in one of two formats: **Option 1:** Use this if you want the human to use a tool. Markdown code snippet formatted in the following schema: ```json {{ "action": string, \\ The action to take. Must be one of {tool_names} "action_input": string \\ The input to the action }} ``` **Option #2:** Use this if you want to respond directly to the human. Markdown code snippet formatted \ in the following schema: ```json {{ "action": "Final Answer", "action_input": string \\ You should put what you want to return to use here }} ``` USER'S INPUT -------------------- Here is the user's input (remember to respond with a markdown code snippet of a json \ blob with a single action, and NOTHING else): {input}''' prompt = ChatPromptTemplate.from_messages( [ ("system", system), MessagesPlaceholder("chat_history", optional=True), ("human", human), MessagesPlaceholder("agent_scratchpad"), ] ) """ # noqa: E501 missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables) ) if missing_vars: raise ValueError(f"Prompt missing required variables: {missing_vars}") if "{observation}" not in template_tool_response: raise ValueError( "Template tool response missing required variable 'observation'" ) prompt = prompt.partial( tools=tools_renderer(list(tools)), tool_names=", ".join([t.name for t in tools]), ) if stop_sequence: stop = ["\nObservation"] if stop_sequence is True else stop_sequence llm_to_use = llm.bind(stop=stop) else: llm_to_use = llm agent = ( RunnablePassthrough.assign( agent_scratchpad=lambda x: format_log_to_messages( x["intermediate_steps"], template_tool_response=template_tool_response ) ) | prompt | llm_to_use | JSONAgentOutputParser() ) return agent
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/json_chat/prompt.py
# flake8: noqa TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE: --------------------- {observation} USER'S INPUT -------------------- Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else - even if you just want to respond to the user. Do NOT respond with anything except a JSON snippet no matter what!"""
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/tool_calling_agent/base.py
from typing import Callable, List, Sequence, Tuple from langchain_core.agents import AgentAction from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage from langchain_core.prompts.chat import ChatPromptTemplate from langchain_core.runnables import Runnable, RunnablePassthrough from langchain_core.tools import BaseTool from langchain.agents.format_scratchpad.tools import ( format_to_tool_messages, ) from langchain.agents.output_parsers.tools import ToolsAgentOutputParser MessageFormatter = Callable[[Sequence[Tuple[AgentAction, str]]], List[BaseMessage]] def create_tool_calling_agent( llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate, *, message_formatter: MessageFormatter = format_to_tool_messages, ) -> Runnable: """Create an agent that uses tools. Args: llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use. See Prompt section below for more on the expected input variables. message_formatter: Formatter function to convert (AgentAction, tool output) tuples into FunctionMessages. Returns: A Runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an AgentAction or AgentFinish. Example: .. code-block:: python from langchain.agents import AgentExecutor, create_tool_calling_agent, tool from langchain_anthropic import ChatAnthropic from langchain_core.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant"), ("placeholder", "{chat_history}"), ("human", "{input}"), ("placeholder", "{agent_scratchpad}"), ] ) model = ChatAnthropic(model="claude-3-opus-20240229") @tool def magic_function(input: int) -> int: \"\"\"Applies a magic function to an input.\"\"\" return input + 2 tools = [magic_function] agent = create_tool_calling_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) agent_executor.invoke({"input": "what is the value of magic_function(3)?"}) # Using with chat history from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", "chat_history": [ HumanMessage(content="hi! my name is bob"), AIMessage(content="Hello Bob! How can I assist you today?"), ], } ) Prompt: The agent prompt must have an `agent_scratchpad` key that is a ``MessagesPlaceholder``. Intermediate agent actions and tool output messages will be passed in here. """ missing_vars = {"agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables) ) if missing_vars: raise ValueError(f"Prompt missing required variables: {missing_vars}") if not hasattr(llm, "bind_tools"): raise ValueError( "This function requires a .bind_tools method be implemented on the LLM.", ) llm_with_tools = llm.bind_tools(tools) agent = ( RunnablePassthrough.assign( agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"]) ) | prompt | llm_with_tools | ToolsAgentOutputParser() ) return agent
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/base.py
from langchain_core.tools import BaseToolkit __all__ = ["BaseToolkit"]
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/azure_cognitive_services.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.azure_cognitive_services import ( AzureCognitiveServicesToolkit, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "AzureCognitiveServicesToolkit": ( "langchain_community.agent_toolkits.azure_cognitive_services" ) } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "AzureCognitiveServicesToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/__init__.py
"""Agent toolkits contain integrations with various resources and services. LangChain has a large ecosystem of integrations with various external resources like local and remote file systems, APIs and databases. These integrations allow developers to create versatile applications that combine the power of LLMs with the ability to access, interact with and manipulate external resources. When developing an application, developers should inspect the capabilities and permissions of the tools that underlie the given agent toolkit, and determine whether permissions of the given toolkit are appropriate for the application. See [Security](https://python.langchain.com/docs/security) for more information. """ from pathlib import Path from typing import TYPE_CHECKING, Any from langchain_core._api.path import as_import_path from langchain_core.tools.retriever import create_retriever_tool from langchain._api import create_importer from langchain.agents.agent_toolkits.conversational_retrieval.openai_functions import ( create_conversational_retrieval_agent, ) from langchain.agents.agent_toolkits.vectorstore.base import ( create_vectorstore_agent, create_vectorstore_router_agent, ) from langchain.agents.agent_toolkits.vectorstore.toolkit import ( VectorStoreInfo, VectorStoreRouterToolkit, VectorStoreToolkit, ) if TYPE_CHECKING: from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit from langchain_community.agent_toolkits.azure_cognitive_services import ( AzureCognitiveServicesToolkit, ) from langchain_community.agent_toolkits.file_management.toolkit import ( FileManagementToolkit, ) from langchain_community.agent_toolkits.gmail.toolkit import GmailToolkit from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit from langchain_community.agent_toolkits.json.base import create_json_agent from langchain_community.agent_toolkits.json.toolkit import JsonToolkit from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit from langchain_community.agent_toolkits.nla.toolkit import NLAToolkit from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit from langchain_community.agent_toolkits.openapi.base import create_openapi_agent from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit from langchain_community.agent_toolkits.playwright.toolkit import ( PlayWrightBrowserToolkit, ) from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent from langchain_community.agent_toolkits.powerbi.chat_base import ( create_pbi_chat_agent, ) from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit from langchain_community.agent_toolkits.slack.toolkit import SlackToolkit from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit from langchain_community.agent_toolkits.sql.base import create_sql_agent from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit from langchain_community.agent_toolkits.zapier.toolkit import ZapierToolkit DEPRECATED_AGENTS = [ "create_csv_agent", "create_pandas_dataframe_agent", "create_xorbits_agent", "create_python_agent", "create_spark_dataframe_agent", ] # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit", "AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit", "AzureCognitiveServicesToolkit": ( "langchain_community.agent_toolkits.azure_cognitive_services" ), "FileManagementToolkit": ( "langchain_community.agent_toolkits.file_management.toolkit" ), "GmailToolkit": "langchain_community.agent_toolkits.gmail.toolkit", "JiraToolkit": "langchain_community.agent_toolkits.jira.toolkit", "JsonToolkit": "langchain_community.agent_toolkits.json.toolkit", "MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit", "NasaToolkit": "langchain_community.agent_toolkits.nasa.toolkit", "NLAToolkit": "langchain_community.agent_toolkits.nla.toolkit", "O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit", "OpenAPIToolkit": "langchain_community.agent_toolkits.openapi.toolkit", "PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit", "PowerBIToolkit": "langchain_community.agent_toolkits.powerbi.toolkit", "SlackToolkit": "langchain_community.agent_toolkits.slack.toolkit", "SteamToolkit": "langchain_community.agent_toolkits.steam.toolkit", "SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit", "SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit", "ZapierToolkit": "langchain_community.agent_toolkits.zapier.toolkit", "create_json_agent": "langchain_community.agent_toolkits.json.base", "create_openapi_agent": "langchain_community.agent_toolkits.openapi.base", "create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base", "create_pbi_chat_agent": "langchain_community.agent_toolkits.powerbi.chat_base", "create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base", "create_sql_agent": "langchain_community.agent_toolkits.sql.base", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Get attr name.""" if name in DEPRECATED_AGENTS: relative_path = as_import_path(Path(__file__).parent, suffix=name) old_path = "langchain." + relative_path new_path = "langchain_experimental." + relative_path raise ImportError( f"{name} has been moved to langchain experimental. " "See https://github.com/langchain-ai/langchain/discussions/11680" "for more information.\n" f"Please update your import statement from: `{old_path}` to `{new_path}`." ) return _import_attribute(name) __all__ = [ "AINetworkToolkit", "AmadeusToolkit", "AzureCognitiveServicesToolkit", "FileManagementToolkit", "GmailToolkit", "JiraToolkit", "JsonToolkit", "MultionToolkit", "NasaToolkit", "NLAToolkit", "O365Toolkit", "OpenAPIToolkit", "PlayWrightBrowserToolkit", "PowerBIToolkit", "SlackToolkit", "SteamToolkit", "SQLDatabaseToolkit", "SparkSQLToolkit", "VectorStoreInfo", "VectorStoreRouterToolkit", "VectorStoreToolkit", "ZapierToolkit", "create_json_agent", "create_openapi_agent", "create_pbi_agent", "create_pbi_chat_agent", "create_spark_sql_agent", "create_sql_agent", "create_vectorstore_agent", "create_vectorstore_router_agent", "create_conversational_retrieval_agent", "create_retriever_tool", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/ainetwork/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "AINetworkToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/ainetwork/__init__.py
"""AINetwork toolkit."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/powerbi/base.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "create_pbi_agent", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/powerbi/chat_base.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.powerbi.chat_base import ( create_pbi_chat_agent, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "create_pbi_chat_agent": "langchain_community.agent_toolkits.powerbi.chat_base" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "create_pbi_chat_agent", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/powerbi/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "PowerBIToolkit": "langchain_community.agent_toolkits.powerbi.toolkit" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "PowerBIToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/powerbi/__init__.py
"""Power BI agent."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/powerbi/prompt.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.powerbi.prompt import ( POWERBI_CHAT_PREFIX, POWERBI_CHAT_SUFFIX, POWERBI_PREFIX, POWERBI_SUFFIX, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "POWERBI_CHAT_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt", "POWERBI_CHAT_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt", "POWERBI_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt", "POWERBI_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "POWERBI_PREFIX", "POWERBI_SUFFIX", "POWERBI_CHAT_PREFIX", "POWERBI_CHAT_SUFFIX", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/github/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.github.toolkit import ( BranchName, CommentOnIssue, CreateFile, CreatePR, CreateReviewRequest, DeleteFile, DirectoryPath, GetIssue, GetPR, GitHubToolkit, NoInput, ReadFile, SearchCode, SearchIssuesAndPRs, UpdateFile, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "NoInput": "langchain_community.agent_toolkits.github.toolkit", "GetIssue": "langchain_community.agent_toolkits.github.toolkit", "CommentOnIssue": "langchain_community.agent_toolkits.github.toolkit", "GetPR": "langchain_community.agent_toolkits.github.toolkit", "CreatePR": "langchain_community.agent_toolkits.github.toolkit", "CreateFile": "langchain_community.agent_toolkits.github.toolkit", "ReadFile": "langchain_community.agent_toolkits.github.toolkit", "UpdateFile": "langchain_community.agent_toolkits.github.toolkit", "DeleteFile": "langchain_community.agent_toolkits.github.toolkit", "DirectoryPath": "langchain_community.agent_toolkits.github.toolkit", "BranchName": "langchain_community.agent_toolkits.github.toolkit", "SearchCode": "langchain_community.agent_toolkits.github.toolkit", "CreateReviewRequest": "langchain_community.agent_toolkits.github.toolkit", "SearchIssuesAndPRs": "langchain_community.agent_toolkits.github.toolkit", "GitHubToolkit": "langchain_community.agent_toolkits.github.toolkit", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "NoInput", "GetIssue", "CommentOnIssue", "GetPR", "CreatePR", "CreateFile", "ReadFile", "UpdateFile", "DeleteFile", "DirectoryPath", "BranchName", "SearchCode", "CreateReviewRequest", "SearchIssuesAndPRs", "GitHubToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/github/__init__.py
"""GitHub Toolkit."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/gitlab/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "GitLabToolkit": "langchain_community.agent_toolkits.gitlab.toolkit" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "GitLabToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/gitlab/__init__.py
"""GitLab Toolkit."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/gmail/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.gmail.toolkit import GmailToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"GmailToolkit": "langchain_community.agent_toolkits.gmail.toolkit"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "GmailToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/gmail/__init__.py
"""Gmail toolkit."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/multion/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "MultionToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/multion/__init__.py
"""MultiOn Toolkit."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/sql/base.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.sql.base import create_sql_agent # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"create_sql_agent": "langchain_community.agent_toolkits.sql.base"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "create_sql_agent", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/sql/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "SQLDatabaseToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/sql/__init__.py
"""SQL agent."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/sql/prompt.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.sql.prompt import ( SQL_FUNCTIONS_SUFFIX, SQL_PREFIX, SQL_SUFFIX, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "SQL_PREFIX": "langchain_community.agent_toolkits.sql.prompt", "SQL_SUFFIX": "langchain_community.agent_toolkits.sql.prompt", "SQL_FUNCTIONS_SUFFIX": "langchain_community.agent_toolkits.sql.prompt", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = ["SQL_PREFIX", "SQL_SUFFIX", "SQL_FUNCTIONS_SUFFIX"]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/slack/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.slack.toolkit import SlackToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"SlackToolkit": "langchain_community.agent_toolkits.slack.toolkit"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "SlackToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/slack/__init__.py
"""Slack toolkit."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/vectorstore/base.py
"""VectorStore agent.""" from typing import Any, Dict, Optional from langchain_core._api import deprecated from langchain_core.callbacks.base import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.vectorstore.prompt import PREFIX, ROUTER_PREFIX from langchain.agents.agent_toolkits.vectorstore.toolkit import ( VectorStoreRouterToolkit, VectorStoreToolkit, ) from langchain.agents.mrkl.base import ZeroShotAgent from langchain.chains.llm import LLMChain @deprecated( since="0.2.13", removal="1.0", message=( "This function will continue to be supported, but it is recommended for new " "use cases to be built with LangGraph. LangGraph offers a more flexible and " "full-featured framework for building agents, including support for " "tool-calling, persistence of state, and human-in-the-loop workflows. " "See API reference for this function for a replacement implementation: " "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501 "Read more here on how to create agents that query vector stores: " "https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents" ), ) def create_vectorstore_agent( llm: BaseLanguageModel, toolkit: VectorStoreToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = PREFIX, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> AgentExecutor: """Construct a VectorStore agent from an LLM and tools. Note: this class is deprecated. See below for a replacement that uses tool calling methods and LangGraph. Install LangGraph with: .. code-block:: bash pip install -U langgraph .. code-block:: python from langchain_core.tools import create_retriever_tool from langchain_core.vectorstores import InMemoryVectorStore from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langgraph.prebuilt import create_react_agent llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) vector_store = InMemoryVectorStore.from_texts( [ "Dogs are great companions, known for their loyalty and friendliness.", "Cats are independent pets that often enjoy their own space.", ], OpenAIEmbeddings(), ) tool = create_retriever_tool( vector_store.as_retriever(), "pet_information_retriever", "Fetches information about pets.", ) agent = create_react_agent(llm, [tool]) for step in agent.stream( {"messages": [("human", "What are dogs known for?")]}, stream_mode="values", ): step["messages"][-1].pretty_print() Args: llm (BaseLanguageModel): LLM that will be used by the agent toolkit (VectorStoreToolkit): Set of tools for the agent callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ] prefix (str, optional): The prefix prompt for the agent. If not provided uses default PREFIX. verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ] agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ] kwargs: Additional named parameters to pass to the ZeroShotAgent. Returns: AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response """ # noqa: E501 tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), ) @deprecated( since="0.2.13", removal="1.0", message=( "This function will continue to be supported, but it is recommended for new " "use cases to be built with LangGraph. LangGraph offers a more flexible and " "full-featured framework for building agents, including support for " "tool-calling, persistence of state, and human-in-the-loop workflows. " "See API reference for this function for a replacement implementation: " "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501 "Read more here on how to create agents that query vector stores: " "https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents" ), ) def create_vectorstore_router_agent( llm: BaseLanguageModel, toolkit: VectorStoreRouterToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = ROUTER_PREFIX, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> AgentExecutor: """Construct a VectorStore router agent from an LLM and tools. Note: this class is deprecated. See below for a replacement that uses tool calling methods and LangGraph. Install LangGraph with: .. code-block:: bash pip install -U langgraph .. code-block:: python from langchain_core.tools import create_retriever_tool from langchain_core.vectorstores import InMemoryVectorStore from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langgraph.prebuilt import create_react_agent llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) pet_vector_store = InMemoryVectorStore.from_texts( [ "Dogs are great companions, known for their loyalty and friendliness.", "Cats are independent pets that often enjoy their own space.", ], OpenAIEmbeddings(), ) food_vector_store = InMemoryVectorStore.from_texts( [ "Carrots are orange and delicious.", "Apples are red and delicious.", ], OpenAIEmbeddings(), ) tools = [ create_retriever_tool( pet_vector_store.as_retriever(), "pet_information_retriever", "Fetches information about pets.", ), create_retriever_tool( food_vector_store.as_retriever(), "food_information_retriever", "Fetches information about food.", ) ] agent = create_react_agent(llm, tools) for step in agent.stream( {"messages": [("human", "Tell me about carrots.")]}, stream_mode="values", ): step["messages"][-1].pretty_print() Args: llm (BaseLanguageModel): LLM that will be used by the agent toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ] prefix (str, optional): The prefix prompt for the router agent. If not provided uses default ROUTER_PREFIX. verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ] agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ] kwargs: Additional named parameters to pass to the ZeroShotAgent. Returns: AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response. """ # noqa: E501 tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py
"""Toolkit for interacting with a vector store.""" from typing import List from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool from langchain_core.tools.base import BaseToolkit from langchain_core.vectorstores import VectorStore from pydantic import BaseModel, ConfigDict, Field class VectorStoreInfo(BaseModel): """Information about a VectorStore.""" vectorstore: VectorStore = Field(exclude=True) name: str description: str model_config = ConfigDict( arbitrary_types_allowed=True, ) class VectorStoreToolkit(BaseToolkit): """Toolkit for interacting with a Vector Store.""" vectorstore_info: VectorStoreInfo = Field(exclude=True) llm: BaseLanguageModel model_config = ConfigDict( arbitrary_types_allowed=True, ) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" try: from langchain_community.tools.vectorstore.tool import ( VectorStoreQATool, VectorStoreQAWithSourcesTool, ) except ImportError: raise ImportError( "You need to install langchain-community to use this toolkit." ) description = VectorStoreQATool.get_description( self.vectorstore_info.name, self.vectorstore_info.description ) qa_tool = VectorStoreQATool( name=self.vectorstore_info.name, description=description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm, ) description = VectorStoreQAWithSourcesTool.get_description( self.vectorstore_info.name, self.vectorstore_info.description ) qa_with_sources_tool = VectorStoreQAWithSourcesTool( name=f"{self.vectorstore_info.name}_with_sources", description=description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm, ) return [qa_tool, qa_with_sources_tool] class VectorStoreRouterToolkit(BaseToolkit): """Toolkit for routing between Vector Stores.""" vectorstores: List[VectorStoreInfo] = Field(exclude=True) llm: BaseLanguageModel model_config = ConfigDict( arbitrary_types_allowed=True, ) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tools: List[BaseTool] = [] try: from langchain_community.tools.vectorstore.tool import ( VectorStoreQATool, ) except ImportError: raise ImportError( "You need to install langchain-community to use this toolkit." ) for vectorstore_info in self.vectorstores: description = VectorStoreQATool.get_description( vectorstore_info.name, vectorstore_info.description ) qa_tool = VectorStoreQATool( name=vectorstore_info.name, description=description, vectorstore=vectorstore_info.vectorstore, llm=self.llm, ) tools.append(qa_tool) return tools
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/vectorstore/__init__.py
"""Agent toolkit for interacting with vector stores."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/vectorstore/prompt.py
# flake8: noqa PREFIX = """You are an agent designed to answer questions about sets of documents. You have access to tools for interacting with the documents, and the inputs to the tools are questions. Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so. If the question does not seem relevant to any of the tools provided, just return "I don't know" as the answer. """ ROUTER_PREFIX = """You are an agent designed to answer questions. You have access to tools for interacting with different sources, and the inputs to the tools are questions. Your main task is to decide which of the tools is relevant for answering question at hand. For complex questions, you can break the question down into sub questions and use tools to answers the sub questions. """
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/xorbits/__init__.py
from pathlib import Path from typing import Any from langchain_core._api.path import as_import_path def __getattr__(name: str) -> Any: """Get attr name.""" if name == "create_xorbits_agent": # Get directory of langchain package HERE = Path(__file__).parents[3] here = as_import_path(Path(__file__).parent, relative_to=HERE) old_path = "langchain." + here + "." + name new_path = "langchain_experimental." + here + "." + name raise ImportError( "This agent has been moved to langchain experiment. " "This agent relies on python REPL tool under the hood, so to use it " "safely please sandbox the python REPL. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "and https://github.com/langchain-ai/langchain/discussions/11680" "To keep using this code as is, install langchain experimental and " f"update your import statement from:\n `{old_path}` to `{new_path}`." ) raise AttributeError(f"{name} does not exist")
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/jira/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = {"JiraToolkit": "langchain_community.agent_toolkits.jira.toolkit"} _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "JiraToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/jira/__init__.py
"""Jira Toolkit."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/spark_sql/base.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "create_spark_sql_agent", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/spark_sql/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "SparkSQLToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/spark_sql/__init__.py
"""Spark SQL agent."""
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/spark_sql/prompt.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.spark_sql.prompt import ( SQL_PREFIX, SQL_SUFFIX, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "SQL_PREFIX": "langchain_community.agent_toolkits.spark_sql.prompt", "SQL_SUFFIX": "langchain_community.agent_toolkits.spark_sql.prompt", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = ["SQL_PREFIX", "SQL_SUFFIX"]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/file_management/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.file_management.toolkit import ( FileManagementToolkit, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "FileManagementToolkit": ( "langchain_community.agent_toolkits.file_management.toolkit" ) } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "FileManagementToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/file_management/__init__.py
"""Local file management toolkit.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.file_management.toolkit import ( FileManagementToolkit, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "FileManagementToolkit": ( "langchain_community.agent_toolkits.file_management.toolkit" ) } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "FileManagementToolkit", ]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/amadeus/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = ["AmadeusToolkit"]
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/spark/__init__.py
from pathlib import Path from typing import Any from langchain_core._api.path import as_import_path def __getattr__(name: str) -> Any: """Get attr name.""" if name == "create_spark_dataframe_agent": # Get directory of langchain package HERE = Path(__file__).parents[3] here = as_import_path(Path(__file__).parent, relative_to=HERE) old_path = "langchain." + here + "." + name new_path = "langchain_experimental." + here + "." + name raise ImportError( "This agent has been moved to langchain experiment. " "This agent relies on python REPL tool under the hood, so to use it " "safely please sandbox the python REPL. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "and https://github.com/langchain-ai/langchain/discussions/11680" "To keep using this code as is, install langchain experimental and " f"update your import statement from:\n `{old_path}` to `{new_path}`." ) raise AttributeError(f"{name} does not exist")
0
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits
lc_public_repos/langchain/libs/langchain/langchain/agents/agent_toolkits/office365/toolkit.py
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit" } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "O365Toolkit", ]