index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos
lc_public_repos/langchainjs/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/environment_tests/docker-compose.yml
version: "3" services: test-exports-esbuild: image: node:18 environment: PUPPETEER_SKIP_DOWNLOAD: "true" PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true" working_dir: /app volumes: - ../yarn.lock:/root/yarn.lock - ../.yarnrc.yml:/root/.yarnrc.yml - ../.yarn:/root/.yarn - ../environment_tests/test-exports-esbuild:/package - ../environment_tests/scripts:/scripts - ../langchain:/langchain - ../langchain-core:/langchain-core - ../libs/langchain-community:/langchain-community - ../libs/langchain-anthropic:/langchain-anthropic - ../libs/langchain-openai:/langchain-openai - ../libs/langchain-cohere:/langchain-cohere command: bash /scripts/docker-ci-entrypoint.sh test-exports-esm: image: node:18 environment: PUPPETEER_SKIP_DOWNLOAD: "true" PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true" working_dir: /app volumes: - ../yarn.lock:/root/yarn.lock - ../.yarnrc.yml:/root/.yarnrc.yml - ../.yarn:/root/.yarn - ../environment_tests/test-exports-esm:/package - ../environment_tests/scripts:/scripts - ../langchain:/langchain - ../langchain-core:/langchain-core - ../libs/langchain-community:/langchain-community - ../libs/langchain-anthropic:/langchain-anthropic - ../libs/langchain-openai:/langchain-openai - ../libs/langchain-cohere:/langchain-cohere command: bash /scripts/docker-ci-entrypoint.sh test-exports-tsc: image: node:18 environment: PUPPETEER_SKIP_DOWNLOAD: "true" PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true" working_dir: /app volumes: - ../yarn.lock:/root/yarn.lock - ../.yarnrc.yml:/root/.yarnrc.yml - ../.yarn:/root/.yarn - ../environment_tests/test-exports-tsc:/package - ../environment_tests/scripts:/scripts - ../langchain:/langchain - ../langchain-core:/langchain-core - ../libs/langchain-community:/langchain-community - ../libs/langchain-anthropic:/langchain-anthropic - ../libs/langchain-openai:/langchain-openai - ../libs/langchain-cohere:/langchain-cohere command: bash /scripts/docker-ci-entrypoint.sh test-exports-cjs: image: node:18 environment: PUPPETEER_SKIP_DOWNLOAD: "true" PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true" working_dir: /app volumes: - ../yarn.lock:/root/yarn.lock - ../.yarnrc.yml:/root/.yarnrc.yml - ../.yarn:/root/.yarn - ../environment_tests/test-exports-cjs:/package - ../environment_tests/scripts:/scripts - ../langchain:/langchain - ../langchain-core:/langchain-core - ../libs/langchain-community:/langchain-community - ../libs/langchain-anthropic:/langchain-anthropic - ../libs/langchain-openai:/langchain-openai - ../libs/langchain-cohere:/langchain-cohere command: bash /scripts/docker-ci-entrypoint.sh test-exports-cf: image: node:18 environment: PUPPETEER_SKIP_DOWNLOAD: "true" PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true" working_dir: /app volumes: - ../yarn.lock:/root/yarn.lock - ../.yarnrc.yml:/root/.yarnrc.yml - ../.yarn:/root/.yarn - ../environment_tests/test-exports-cf:/package - ../environment_tests/scripts:/scripts - ../langchain:/langchain - ../langchain-core:/langchain-core - ../libs/langchain-community:/langchain-community - ../libs/langchain-anthropic:/langchain-anthropic - ../libs/langchain-openai:/langchain-openai - ../libs/langchain-cohere:/langchain-cohere command: bash /scripts/docker-ci-entrypoint.sh test-exports-vercel: image: node:18 environment: PUPPETEER_SKIP_DOWNLOAD: "true" PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true" working_dir: /app volumes: - ../yarn.lock:/root/yarn.lock - ../.yarnrc.yml:/root/.yarnrc.yml - ../.yarn:/root/.yarn - ../environment_tests/test-exports-vercel:/package - ../environment_tests/scripts:/scripts - ../langchain:/langchain - ../langchain-core:/langchain-core - ../libs/langchain-community:/langchain-community - ../libs/langchain-anthropic:/langchain-anthropic - ../libs/langchain-openai:/langchain-openai - ../libs/langchain-cohere:/langchain-cohere command: bash /scripts/docker-ci-entrypoint.sh test-exports-vite: image: node:18 environment: PUPPETEER_SKIP_DOWNLOAD: "true" PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: "true" working_dir: /app volumes: - ../yarn.lock:/root/yarn.lock - ../.yarnrc.yml:/root/.yarnrc.yml - ../.yarn:/root/.yarn - ../environment_tests/test-exports-vite:/package - ../environment_tests/scripts:/scripts - ../langchain:/langchain - ../langchain-core:/langchain-core - ../libs/langchain-community:/langchain-community - ../libs/langchain-anthropic:/langchain-anthropic - ../libs/langchain-openai:/langchain-openai - ../libs/langchain-cohere:/langchain-cohere command: bash /scripts/docker-ci-entrypoint.sh # test-exports-bun: # image: oven/bun # working_dir: /app # volumes: # - ../environment_tests/test-exports-bun:/package # - ../environment_tests/scripts:/scripts # - ../langchain:/langchain-workspace # - ../langchain-core:/langchain-core # - ../libs/langchain-community:/langchain-community-workspace # - ../libs/langchain-anthropic:/langchain-anthropic-workspace # command: bash /scripts/docker-bun-ci-entrypoint.sh success: image: alpine:3.14 command: echo "Success" depends_on: test-exports-esbuild: condition: service_completed_successfully test-exports-esm: condition: service_completed_successfully test-exports-tsc: condition: service_completed_successfully test-exports-cjs: condition: service_completed_successfully test-exports-cf: condition: service_completed_successfully test-exports-vercel: condition: service_completed_successfully test-exports-vite: condition: service_completed_successfully # test-exports-bun: # condition: service_completed_successfully
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/tsconfig.json
{ "compilerOptions": { "target": "es5", "lib": ["dom", "dom.iterable", "esnext"], "allowJs": true, "skipLibCheck": true, "strict": true, "forceConsistentCasingInFileNames": true, "noEmit": true, "esModuleInterop": true, "module": "esnext", "moduleResolution": "node", "resolveJsonModule": true, "isolatedModules": true, "jsx": "preserve", "incremental": true, "paths": { "@/*": ["./src/*"], "react": ["./node_modules/@types/react"] } }, "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], "exclude": [ "node_modules", "libs" ] }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/.eslintrc.json
{ "extends": "next/core-web-vitals" }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/README.md
This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). ## Getting Started First, run the development server: ```bash npm run dev # or yarn dev # or pnpm dev ``` Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. You can start editing the page by modifying `pages/index.tsx`. The page auto-updates as you edit the file. [API routes](https://nextjs.org/docs/api-routes/introduction) can be accessed on [http://localhost:3000/api/hello](http://localhost:3000/api/hello). This endpoint can be edited in `pages/api/hello.ts`. The `pages/api` directory is mapped to `/api/*`. Files in this directory are treated as [API routes](https://nextjs.org/docs/api-routes/introduction) instead of React pages. This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. ## Learn More To learn more about Next.js, take a look at the following resources: - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! ## Deploy on Vercel The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details.
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/package.json
{ "name": "test-exports-vercel", "version": "0.1.0", "workspaces": [ "libs/*" ], "private": true, "scripts": { "dev": "next dev", "build": "next build", "start": "next start", "test": "next lint" }, "dependencies": { "@langchain/anthropic": "workspace:*", "@langchain/community": "workspace:*", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:*", "@types/node": "18.15.11", "@types/react": "18.0.33", "@types/react-dom": "18.0.11", "eslint": "8.37.0", "eslint-config-next": "13.3.0", "langchain": "workspace:*", "next": "13.4.9", "react": "18.2.0", "react-dom": "18.2.0", "typescript": "^5.0.0" } }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/entrypoints.js
export * from "langchain/load"; export * from "langchain/load/serializable"; export * from "langchain/agents"; export * from "langchain/agents/toolkits"; export * from "langchain/agents/format_scratchpad"; export * from "langchain/agents/format_scratchpad/openai_tools"; export * from "langchain/agents/format_scratchpad/log"; export * from "langchain/agents/format_scratchpad/xml"; export * from "langchain/agents/format_scratchpad/log_to_message"; export * from "langchain/agents/react/output_parser"; export * from "langchain/agents/xml/output_parser"; export * from "langchain/agents/openai/output_parser"; export * from "langchain/tools"; export * from "langchain/tools/chain"; export * from "langchain/tools/render"; export * from "langchain/tools/retriever"; export * from "langchain/chains"; export * from "langchain/chains/combine_documents"; export * from "langchain/chains/combine_documents/reduce"; export * from "langchain/chains/history_aware_retriever"; export * from "langchain/chains/openai_functions"; export * from "langchain/chains/retrieval"; export * from "langchain/embeddings/cache_backed"; export * from "langchain/embeddings/fake"; export * from "langchain/vectorstores/memory"; export * from "langchain/text_splitter"; export * from "langchain/memory"; export * from "langchain/memory/chat_memory"; export * from "langchain/document"; export * from "langchain/document_loaders/base"; export * from "langchain/document_transformers/openai_functions"; export * from "langchain/callbacks"; export * from "langchain/output_parsers"; export * from "langchain/retrievers/contextual_compression"; export * from "langchain/retrievers/document_compressors"; export * from "langchain/retrievers/ensemble"; export * from "langchain/retrievers/multi_query"; export * from "langchain/retrievers/multi_vector"; export * from "langchain/retrievers/parent_document"; export * from "langchain/retrievers/time_weighted"; export * from "langchain/retrievers/document_compressors/chain_extract"; export * from "langchain/retrievers/document_compressors/embeddings_filter"; export * from "langchain/retrievers/hyde"; export * from "langchain/retrievers/score_threshold"; export * from "langchain/retrievers/matryoshka_retriever"; export * from "langchain/stores/doc/base"; export * from "langchain/stores/doc/in_memory"; export * from "langchain/stores/file/in_memory"; export * from "langchain/stores/message/in_memory"; export * from "langchain/storage/encoder_backed"; export * from "langchain/storage/in_memory"; export * from "langchain/util/document"; export * from "langchain/util/math"; export * from "langchain/util/time"; export * from "langchain/experimental/autogpt"; export * from "langchain/experimental/openai_assistant"; export * from "langchain/experimental/openai_files"; export * from "langchain/experimental/babyagi"; export * from "langchain/experimental/generative_agents"; export * from "langchain/experimental/plan_and_execute"; export * from "langchain/experimental/chains/violation_of_expectations"; export * from "langchain/experimental/masking"; export * from "langchain/experimental/prompts/custom_format"; export * from "langchain/evaluation"; export * from "langchain/smith"; export * from "langchain/runnables/remote"; export * from "langchain/indexes"; export * from "langchain/schema/query_constructor"; export * from "langchain/schema/prompt_template";
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/styles/globals.css
:root { --max-width: 1100px; --border-radius: 12px; --font-mono: ui-monospace, Menlo, Monaco, 'Cascadia Mono', 'Segoe UI Mono', 'Roboto Mono', 'Oxygen Mono', 'Ubuntu Monospace', 'Source Code Pro', 'Fira Mono', 'Droid Sans Mono', 'Courier New', monospace; --foreground-rgb: 0, 0, 0; --background-start-rgb: 214, 219, 220; --background-end-rgb: 255, 255, 255; --primary-glow: conic-gradient( from 180deg at 50% 50%, #16abff33 0deg, #0885ff33 55deg, #54d6ff33 120deg, #0071ff33 160deg, transparent 360deg ); --secondary-glow: radial-gradient( rgba(255, 255, 255, 1), rgba(255, 255, 255, 0) ); --tile-start-rgb: 239, 245, 249; --tile-end-rgb: 228, 232, 233; --tile-border: conic-gradient( #00000080, #00000040, #00000030, #00000020, #00000010, #00000010, #00000080 ); --callout-rgb: 238, 240, 241; --callout-border-rgb: 172, 175, 176; --card-rgb: 180, 185, 188; --card-border-rgb: 131, 134, 135; } @media (prefers-color-scheme: dark) { :root { --foreground-rgb: 255, 255, 255; --background-start-rgb: 0, 0, 0; --background-end-rgb: 0, 0, 0; --primary-glow: radial-gradient(rgba(1, 65, 255, 0.4), rgba(1, 65, 255, 0)); --secondary-glow: linear-gradient( to bottom right, rgba(1, 65, 255, 0), rgba(1, 65, 255, 0), rgba(1, 65, 255, 0.3) ); --tile-start-rgb: 2, 13, 46; --tile-end-rgb: 2, 5, 19; --tile-border: conic-gradient( #ffffff80, #ffffff40, #ffffff30, #ffffff20, #ffffff10, #ffffff10, #ffffff80 ); --callout-rgb: 20, 20, 20; --callout-border-rgb: 108, 108, 108; --card-rgb: 100, 100, 100; --card-border-rgb: 200, 200, 200; } } * { box-sizing: border-box; padding: 0; margin: 0; } html, body { max-width: 100vw; overflow-x: hidden; } body { color: rgb(var(--foreground-rgb)); background: linear-gradient( to bottom, transparent, rgb(var(--background-end-rgb)) ) rgb(var(--background-start-rgb)); } a { color: inherit; text-decoration: none; } @media (prefers-color-scheme: dark) { html { color-scheme: dark; } }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/styles/Home.module.css
.main { display: flex; flex-direction: column; justify-content: space-between; align-items: center; padding: 6rem; min-height: 100vh; } .description { display: inherit; justify-content: inherit; align-items: inherit; font-size: 0.85rem; max-width: var(--max-width); width: 100%; z-index: 2; font-family: var(--font-mono); } .description a { display: flex; justify-content: center; align-items: center; gap: 0.5rem; } .description p { position: relative; margin: 0; padding: 1rem; background-color: rgba(var(--callout-rgb), 0.5); border: 1px solid rgba(var(--callout-border-rgb), 0.3); border-radius: var(--border-radius); } .code { font-weight: 700; font-family: var(--font-mono); } .grid { display: grid; grid-template-columns: repeat(4, minmax(25%, auto)); width: var(--max-width); max-width: 100%; } .card { padding: 1rem 1.2rem; border-radius: var(--border-radius); background: rgba(var(--card-rgb), 0); border: 1px solid rgba(var(--card-border-rgb), 0); transition: background 200ms, border 200ms; } .card span { display: inline-block; transition: transform 200ms; } .card h2 { font-weight: 600; margin-bottom: 0.7rem; } .card p { margin: 0; opacity: 0.6; font-size: 0.9rem; line-height: 1.5; max-width: 30ch; } .center { display: flex; justify-content: center; align-items: center; position: relative; padding: 4rem 0; } .center::before { background: var(--secondary-glow); border-radius: 50%; width: 480px; height: 360px; margin-left: -400px; } .center::after { background: var(--primary-glow); width: 240px; height: 180px; z-index: -1; } .center::before, .center::after { content: ''; left: 50%; position: absolute; filter: blur(45px); transform: translateZ(0); } .logo, .thirteen { position: relative; } .thirteen { display: flex; justify-content: center; align-items: center; width: 75px; height: 75px; padding: 25px 10px; margin-left: 16px; transform: translateZ(0); border-radius: var(--border-radius); overflow: hidden; box-shadow: 0px 2px 8px -1px #0000001a; } .thirteen::before, .thirteen::after { content: ''; position: absolute; z-index: -1; } /* Conic Gradient Animation */ .thirteen::before { animation: 6s rotate linear infinite; width: 200%; height: 200%; background: var(--tile-border); } /* Inner Square */ .thirteen::after { inset: 0; padding: 1px; border-radius: var(--border-radius); background: linear-gradient( to bottom right, rgba(var(--tile-start-rgb), 1), rgba(var(--tile-end-rgb), 1) ); background-clip: content-box; } /* Enable hover only on non-touch devices */ @media (hover: hover) and (pointer: fine) { .card:hover { background: rgba(var(--card-rgb), 0.1); border: 1px solid rgba(var(--card-border-rgb), 0.15); } .card:hover span { transform: translateX(4px); } } @media (prefers-reduced-motion) { .thirteen::before { animation: none; } .card:hover span { transform: none; } } /* Mobile */ @media (max-width: 700px) { .content { padding: 4rem; } .grid { grid-template-columns: 1fr; margin-bottom: 120px; max-width: 320px; text-align: center; } .card { padding: 1rem 2.5rem; } .card h2 { margin-bottom: 0.5rem; } .center { padding: 8rem 0 6rem; } .center::before { transform: none; height: 300px; } .description { font-size: 0.8rem; } .description a { padding: 1rem; } .description p, .description div { display: flex; justify-content: center; position: fixed; width: 100%; } .description p { align-items: center; inset: 0 0 auto; padding: 2rem 1rem 1.4rem; border-radius: 0; border: none; border-bottom: 1px solid rgba(var(--callout-border-rgb), 0.25); background: linear-gradient( to bottom, rgba(var(--background-start-rgb), 1), rgba(var(--callout-rgb), 0.5) ); background-clip: padding-box; backdrop-filter: blur(24px); } .description div { align-items: flex-end; pointer-events: none; inset: auto 0 0; padding: 2rem; height: 200px; background: linear-gradient( to bottom, transparent 0%, rgb(var(--background-end-rgb)) 40% ); z-index: 1; } } /* Tablet and Smaller Desktop */ @media (min-width: 701px) and (max-width: 1120px) { .grid { grid-template-columns: repeat(2, 50%); } } @media (prefers-color-scheme: dark) { .vercelLogo { filter: invert(1); } .logo, .thirteen img { filter: invert(1) drop-shadow(0 0 0.3rem #ffffff70); } } @keyframes rotate { from { transform: rotate(360deg); } to { transform: rotate(0deg); } }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/pages/index.tsx
// import all entrypoints to test, do not do this in your own app import "../entrypoints.js"; import Head from "next/head"; import styles from "@/styles/Home.module.css"; import { useCallback } from "react"; import { ChatOpenAI } from "@langchain/openai"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { LLMChain } from "langchain/chains"; import { ChatPromptTemplate, HumanMessagePromptTemplate, } from "@langchain/core/prompts"; // Don't do this in your app, it would leak your API key const OPENAI_API_KEY = process.env.NEXT_PUBLIC_OPENAI_API_KEY; export default function Home() { const runChain = useCallback(async () => { const llm = new ChatOpenAI({ openAIApiKey: OPENAI_API_KEY, streaming: true, callbackManager: CallbackManager.fromHandlers({ handleLLMNewToken: async (token) => console.log("handleLLMNewToken", token), }), }); // Test count tokens const n = await llm.getNumTokens("Hello"); console.log("getNumTokens", n); // Test a chain + prompt + model const chain = new LLMChain({ llm, prompt: ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate("{input}"), ]), }); const res = await chain.run("hello"); console.log("runChain", res); }, []); return ( <> <Head> <title>Create Next App</title> <meta name="description" content="Generated by create next app" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <link rel="icon" href="/favicon.ico" /> </Head> <main className={styles.main}> <button onClick={runChain}>Click to run a chain</button> </main> </> ); }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/pages/_document.tsx
import { Html, Head, Main, NextScript } from 'next/document' export default function Document() { return ( <Html lang="en"> <Head /> <body> <Main /> <NextScript /> </body> </Html> ) }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/pages/_app.tsx
import '@/styles/globals.css' import type { AppProps } from 'next/app' export default function App({ Component, pageProps }: AppProps) { return <Component {...pageProps} /> }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/pages
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/pages/api/hello-serverless.ts
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction // import all entrypoints to test, do not do this in your own app import "../../entrypoints.js"; // Import a few things we'll use to test the exports import { LLMChain } from "langchain/chains"; import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, HumanMessagePromptTemplate, } from "@langchain/core/prompts"; import { OpenAI } from "@langchain/openai"; import { OpenAIEmbeddings } from "@langchain/openai"; import { TextLoader } from "langchain/document_loaders/fs/text"; import { NextApiRequest, NextApiResponse } from "next"; export default async function handler( req: NextApiRequest, res: NextApiResponse ) { // Intantiate a few things to test the exports new OpenAI({ openAIApiKey: process.env.OPENAI_API_KEY }); const emb = new OpenAIEmbeddings({ openAIApiKey: process.env.OPENAI_API_KEY, }); // Test a document loader from a blob const docs = new TextLoader(new Blob(["hello"])); // Test a chain + prompt + model const chain = new LLMChain({ llm: new ChatOpenAI({ openAIApiKey: process.env.OPENAI_API_KEY }), prompt: ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate("{input}"), ]), }); const output = await chain.run("hello"); return res.status(200).json({ name: `Hello, from ${req.url} I'm a Serverless Function! Assistant says: ${output}`, }); }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/pages
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/src/pages/api/hello-edge.ts
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction // import all entrypoints to test, do not do this in your own app import "../../entrypoints.js"; // Import a few things we'll use to test the exports import { LLMChain } from "langchain/chains"; import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, HumanMessagePromptTemplate, } from "@langchain/core/prompts"; import { OpenAI } from "@langchain/openai"; import { OpenAIEmbeddings } from "@langchain/openai"; import { CallbackManager } from "@langchain/core/callbacks/manager"; import { ChatAgent } from "langchain/agents"; import { NextRequest, NextResponse } from "next/server"; export const config = { runtime: "edge", }; export default async function handler(req: NextRequest) { // Intantiate a few things to test the exports new OpenAI({ openAIApiKey: process.env.OPENAI_API_KEY }); const emb = new OpenAIEmbeddings({ openAIApiKey: process.env.OPENAI_API_KEY, }); const agent = ChatAgent.fromLLMAndTools(new ChatOpenAI(), []); // Set up a streaming LLM const encoder = new TextEncoder(); const stream = new TransformStream(); const writer = stream.writable.getWriter(); const llm = new ChatOpenAI({ streaming: true, callbackManager: CallbackManager.fromHandlers({ handleLLMNewToken: async (token) => { await writer.ready; await writer.write(encoder.encode(`data: ${token}\n\n`)); }, handleLLMEnd: async () => { await writer.ready; await writer.close(); }, handleLLMError: async (e) => { await writer.ready; await writer.abort(e); }, }), }); // Test a chain + prompt + model const chain = new LLMChain({ llm, prompt: ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate("{input}"), ]), }); // Run the chain but don't await it, otherwise the response will start // only after the chain is done chain.run("hello").catch(console.error); return new NextResponse(stream.readable, { headers: { "Content-Type": "text/event-stream", "Cache-Control": "no-cache", }, }); }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/public/next.svg
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 394 80"><path fill="#000" d="M262 0h68.5v12.7h-27.2v66.6h-13.6V12.7H262V0ZM149 0v12.7H94v20.4h44.3v12.6H94v21h55v12.6H80.5V0h68.7zm34.3 0h-17.8l63.8 79.4h17.9l-32-39.7 32-39.6h-17.9l-23 28.6-23-28.6zm18.3 56.7-9-11-27.1 33.7h17.8l18.3-22.7z"/><path fill="#000" d="M81 79.3 17 0H0v79.3h13.6V17l50.2 62.3H81Zm252.6-.4c-1 0-1.8-.4-2.5-1s-1.1-1.6-1.1-2.6.3-1.8 1-2.5 1.6-1 2.6-1 1.8.3 2.5 1a3.4 3.4 0 0 1 .6 4.3 3.7 3.7 0 0 1-3 1.8zm23.2-33.5h6v23.3c0 2.1-.4 4-1.3 5.5a9.1 9.1 0 0 1-3.8 3.5c-1.6.8-3.5 1.3-5.7 1.3-2 0-3.7-.4-5.3-1s-2.8-1.8-3.7-3.2c-.9-1.3-1.4-3-1.4-5h6c.1.8.3 1.6.7 2.2s1 1.2 1.6 1.5c.7.4 1.5.5 2.4.5 1 0 1.8-.2 2.4-.6a4 4 0 0 0 1.6-1.8c.3-.8.5-1.8.5-3V45.5zm30.9 9.1a4.4 4.4 0 0 0-2-3.3 7.5 7.5 0 0 0-4.3-1.1c-1.3 0-2.4.2-3.3.5-.9.4-1.6 1-2 1.6a3.5 3.5 0 0 0-.3 4c.3.5.7.9 1.3 1.2l1.8 1 2 .5 3.2.8c1.3.3 2.5.7 3.7 1.2a13 13 0 0 1 3.2 1.8 8.1 8.1 0 0 1 3 6.5c0 2-.5 3.7-1.5 5.1a10 10 0 0 1-4.4 3.5c-1.8.8-4.1 1.2-6.8 1.2-2.6 0-4.9-.4-6.8-1.2-2-.8-3.4-2-4.5-3.5a10 10 0 0 1-1.7-5.6h6a5 5 0 0 0 3.5 4.6c1 .4 2.2.6 3.4.6 1.3 0 2.5-.2 3.5-.6 1-.4 1.8-1 2.4-1.7a4 4 0 0 0 .8-2.4c0-.9-.2-1.6-.7-2.2a11 11 0 0 0-2.1-1.4l-3.2-1-3.8-1c-2.8-.7-5-1.7-6.6-3.2a7.2 7.2 0 0 1-2.4-5.7 8 8 0 0 1 1.7-5 10 10 0 0 1 4.3-3.5c2-.8 4-1.2 6.4-1.2 2.3 0 4.4.4 6.2 1.2 1.8.8 3.2 2 4.3 3.4 1 1.4 1.5 3 1.5 5h-5.8z"/></svg>
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/public/thirteen.svg
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="31" fill="none"><g opacity=".9"><path fill="url(#a)" d="M13 .4v29.3H7V6.3h-.2L0 10.5V5L7.2.4H13Z"/><path fill="url(#b)" d="M28.8 30.1c-2.2 0-4-.3-5.7-1-1.7-.8-3-1.8-4-3.1a7.7 7.7 0 0 1-1.4-4.6h6.2c0 .8.3 1.4.7 2 .4.5 1 .9 1.7 1.2.7.3 1.6.4 2.5.4 1 0 1.7-.2 2.5-.5.7-.3 1.3-.8 1.7-1.4.4-.6.6-1.2.6-2s-.2-1.5-.7-2.1c-.4-.6-1-1-1.8-1.4-.8-.4-1.8-.5-2.9-.5h-2.7v-4.6h2.7a6 6 0 0 0 2.5-.5 4 4 0 0 0 1.7-1.3c.4-.6.6-1.3.6-2a3.5 3.5 0 0 0-2-3.3 5.6 5.6 0 0 0-4.5 0 4 4 0 0 0-1.7 1.2c-.4.6-.6 1.2-.6 2h-6c0-1.7.6-3.2 1.5-4.5 1-1.3 2.2-2.3 3.8-3C25 .4 26.8 0 28.8 0s3.8.4 5.3 1.1c1.5.7 2.7 1.7 3.6 3a7.2 7.2 0 0 1 1.2 4.2c0 1.6-.5 3-1.5 4a7 7 0 0 1-4 2.2v.2c2.2.3 3.8 1 5 2.2a6.4 6.4 0 0 1 1.6 4.6c0 1.7-.5 3.1-1.4 4.4a9.7 9.7 0 0 1-4 3.1c-1.7.8-3.7 1.1-5.8 1.1Z"/></g><defs><linearGradient id="a" x1="20" x2="20" y1="0" y2="30.1" gradientUnits="userSpaceOnUse"><stop/><stop offset="1" stop-color="#3D3D3D"/></linearGradient><linearGradient id="b" x1="20" x2="20" y1="0" y2="30.1" gradientUnits="userSpaceOnUse"><stop/><stop offset="1" stop-color="#3D3D3D"/></linearGradient></defs></svg>
0
lc_public_repos/langchainjs/environment_tests/test-exports-vercel
lc_public_repos/langchainjs/environment_tests/test-exports-vercel/public/vercel.svg
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 283 64"><path fill="black" d="M141 16c-11 0-19 7-19 18s9 18 20 18c7 0 13-3 16-7l-7-5c-2 3-6 4-9 4-5 0-9-3-10-7h28v-3c0-11-8-18-19-18zm-9 15c1-4 4-7 9-7s8 3 9 7h-18zm117-15c-11 0-19 7-19 18s9 18 20 18c6 0 12-3 16-7l-8-5c-2 3-5 4-8 4-5 0-9-3-11-7h28l1-3c0-11-8-18-19-18zm-10 15c2-4 5-7 10-7s8 3 9 7h-19zm-39 3c0 6 4 10 10 10 4 0 7-2 9-5l8 5c-3 5-9 8-17 8-11 0-19-7-19-18s8-18 19-18c8 0 14 3 17 8l-8 5c-2-3-5-5-9-5-6 0-10 4-10 10zm83-29v46h-9V5h9zM37 0l37 64H0L37 0zm92 5-27 48L74 5h10l18 30 17-30h10zm59 12v10l-3-1c-6 0-10 4-10 10v15h-9V17h9v9c0-5 6-9 13-9z"/></svg>
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "./dist", "rootDir": "./src", "lib": [ "ES2021", "ES2022.Object", "DOM" ], "target": "ES2021", "module": "nodenext", }, "include": [ "src/**/*" ] }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild/package.json
{ "name": "test-exports-esbuild", "version": "0.0.0", "workspaces": [ "libs/*" ], "private": true, "description": "Tests for the things exported by the langchain package", "main": "./index.mjs", "type": "module", "scripts": { "build": "tsc", "test": "yarn run test:esm && yarn run test:cjs", "test:esm": "rm -rf dist-esm && esbuild --bundle src/* --outdir=dist-esm --platform=node --format=esm --external:'../node_modules/*' --external:'./node_modules/*' --supported:top-level-await=true && bash ./entrypoint.sh dist-esm", "test:cjs": "rm -rf dist-cjs && esbuild --bundle src/*.cjs --outdir=dist-cjs --platform=node --format=cjs --external:'../node_modules/*' --external:'./node_modules/*' --out-extension:.js=.cjs && bash ./entrypoint.sh dist-cjs", "format": "prettier --write src", "format:check": "prettier --check src" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/anthropic": "workspace:*", "@langchain/community": "workspace:*", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:*", "@tsconfig/recommended": "^1.0.2", "esbuild": "^0.17.18", "langchain": "workspace:*", "typescript": "^5.0.0" }, "devDependencies": { "@types/node": "^18.15.11", "prettier": "^2.8.3" } }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild/entrypoint.sh
#!/usr/bin/env bash set -euxo pipefail DIR=$1 for file in $DIR/*; do if [[ $file == 'dist-esm/require.js' ]]; then continue; fi node $file; done
0
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild/src/index.js
import assert from "assert"; import { OpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { CallbackManager } from "@langchain/core/callbacks/manager"; // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); assert(typeof OpenAIEmbeddings === "function"); assert(typeof CallbackManager === "function"); const vs = new MemoryVectorStore(new OpenAIEmbeddings({ openAIApiKey: "sk-XXXX" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1);
0
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild/src/typescript.ts
import assert from "assert"; import { OpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; async function test(useAzure: boolean = false) { // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const openAIParameters = useAzure ? { azureOpenAIApiKey: "sk-XXXX", azureOpenAIApiInstanceName: "XXXX", azureOpenAIApiDeploymentName: "XXXX", azureOpenAIApiVersion: "XXXX", } : { openAIApiKey: "sk-XXXX", }; const vs = new MemoryVectorStore(new OpenAIEmbeddings(openAIParameters)); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test(false) .then(() => console.log("openAI Api success")) .catch((e) => { console.error(e); process.exit(1); }); test(true) .then(() => console.log("Azure openAI Api success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild/src/import.cjs
async function test() { const { default: assert } = await import("assert"); const { OpenAI } = await import("@langchain/openai"); const { LLMChain } = await import("langchain/chains"); const { ChatPromptTemplate } = await import("@langchain/core/prompts"); const { MemoryVectorStore } = await import("langchain/vectorstores/memory"); const { OpenAIEmbeddings } = await import("@langchain/openai"); const { Document } = await import("@langchain/core/documents"); // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const vs = new MemoryVectorStore(new OpenAIEmbeddings({ openAIApiKey: "sk-XXXX" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test() .then(() => console.log("success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild/src/entrypoints.js
import * as load from "langchain/load"; import * as load_serializable from "langchain/load/serializable"; import * as agents from "langchain/agents"; import * as agents_toolkits from "langchain/agents/toolkits"; import * as agents_format_scratchpad from "langchain/agents/format_scratchpad"; import * as agents_format_scratchpad_openai_tools from "langchain/agents/format_scratchpad/openai_tools"; import * as agents_format_scratchpad_log from "langchain/agents/format_scratchpad/log"; import * as agents_format_scratchpad_xml from "langchain/agents/format_scratchpad/xml"; import * as agents_format_scratchpad_log_to_message from "langchain/agents/format_scratchpad/log_to_message"; import * as agents_react_output_parser from "langchain/agents/react/output_parser"; import * as agents_xml_output_parser from "langchain/agents/xml/output_parser"; import * as agents_openai_output_parser from "langchain/agents/openai/output_parser"; import * as tools from "langchain/tools"; import * as tools_chain from "langchain/tools/chain"; import * as tools_render from "langchain/tools/render"; import * as tools_retriever from "langchain/tools/retriever"; import * as chains from "langchain/chains"; import * as chains_combine_documents from "langchain/chains/combine_documents"; import * as chains_combine_documents_reduce from "langchain/chains/combine_documents/reduce"; import * as chains_history_aware_retriever from "langchain/chains/history_aware_retriever"; import * as chains_openai_functions from "langchain/chains/openai_functions"; import * as chains_retrieval from "langchain/chains/retrieval"; import * as embeddings_cache_backed from "langchain/embeddings/cache_backed"; import * as embeddings_fake from "langchain/embeddings/fake"; import * as vectorstores_memory from "langchain/vectorstores/memory"; import * as text_splitter from "langchain/text_splitter"; import * as memory from "langchain/memory"; import * as memory_chat_memory from "langchain/memory/chat_memory"; import * as document from "langchain/document"; import * as document_loaders_base from "langchain/document_loaders/base"; import * as document_transformers_openai_functions from "langchain/document_transformers/openai_functions"; import * as callbacks from "langchain/callbacks"; import * as output_parsers from "langchain/output_parsers"; import * as retrievers_contextual_compression from "langchain/retrievers/contextual_compression"; import * as retrievers_document_compressors from "langchain/retrievers/document_compressors"; import * as retrievers_ensemble from "langchain/retrievers/ensemble"; import * as retrievers_multi_query from "langchain/retrievers/multi_query"; import * as retrievers_multi_vector from "langchain/retrievers/multi_vector"; import * as retrievers_parent_document from "langchain/retrievers/parent_document"; import * as retrievers_time_weighted from "langchain/retrievers/time_weighted"; import * as retrievers_document_compressors_chain_extract from "langchain/retrievers/document_compressors/chain_extract"; import * as retrievers_document_compressors_embeddings_filter from "langchain/retrievers/document_compressors/embeddings_filter"; import * as retrievers_hyde from "langchain/retrievers/hyde"; import * as retrievers_score_threshold from "langchain/retrievers/score_threshold"; import * as retrievers_matryoshka_retriever from "langchain/retrievers/matryoshka_retriever"; import * as stores_doc_base from "langchain/stores/doc/base"; import * as stores_doc_in_memory from "langchain/stores/doc/in_memory"; import * as stores_file_in_memory from "langchain/stores/file/in_memory"; import * as stores_message_in_memory from "langchain/stores/message/in_memory"; import * as storage_encoder_backed from "langchain/storage/encoder_backed"; import * as storage_in_memory from "langchain/storage/in_memory"; import * as util_document from "langchain/util/document"; import * as util_math from "langchain/util/math"; import * as util_time from "langchain/util/time"; import * as experimental_autogpt from "langchain/experimental/autogpt"; import * as experimental_openai_assistant from "langchain/experimental/openai_assistant"; import * as experimental_openai_files from "langchain/experimental/openai_files"; import * as experimental_babyagi from "langchain/experimental/babyagi"; import * as experimental_generative_agents from "langchain/experimental/generative_agents"; import * as experimental_plan_and_execute from "langchain/experimental/plan_and_execute"; import * as experimental_chains_violation_of_expectations from "langchain/experimental/chains/violation_of_expectations"; import * as experimental_masking from "langchain/experimental/masking"; import * as experimental_prompts_custom_format from "langchain/experimental/prompts/custom_format"; import * as evaluation from "langchain/evaluation"; import * as smith from "langchain/smith"; import * as runnables_remote from "langchain/runnables/remote"; import * as indexes from "langchain/indexes"; import * as schema_query_constructor from "langchain/schema/query_constructor"; import * as schema_prompt_template from "langchain/schema/prompt_template";
0
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild
lc_public_repos/langchainjs/environment_tests/test-exports-esbuild/src/require.cjs
const assert = require("assert"); const { OpenAI } = require("@langchain/openai"); const { LLMChain } = require("langchain/chains"); const { ChatPromptTemplate } = require("@langchain/core/prompts"); const { MemoryVectorStore } = require("langchain/vectorstores/memory"); const { OpenAIEmbeddings } = require("@langchain/openai"); const { Document } = require("@langchain/core/documents"); async function test() { // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const vs = new MemoryVectorStore(new OpenAIEmbeddings({ openAIApiKey: "sk-XXXX" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test() .then(() => console.log("success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-bun/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "./dist", "rootDir": "./src", "lib": [ "ES2021", "ES2022.Object", "DOM" ], "target": "ES2021", }, "include": [ "src/**/*" ] }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-bun/package.json
{ "name": "test-exports-bun", "version": "0.0.0", "private": true, "description": "Tests for the things exported by the langchain package", "main": "./index.mjs", "type": "module", "scripts": { "build": "tsc", "test": "bun run test:esm && bun run test:cjs && bun run test:cjs:import && bun run test:entrypoints && bun run test:ts", "test:esm": "bun src/index.js", "test:cjs": "bun src/require.cjs", "test:cjs:import": "bun src/import.cjs", "test:entrypoints": "bun src/entrypoints.js", "test:ts": "bun dist/index.js" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/anthropic": "workspace:*", "@langchain/community": "workspace:*", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:*", "d3-dsv": "2", "hnswlib-node": "^3.0.0", "langchain": "workspace:*" }, "devDependencies": { "@tsconfig/recommended": "^1.0.2", "@types/node": "^18.15.11", "typescript": "^5.0.0" } }
0
lc_public_repos/langchainjs/environment_tests/test-exports-bun
lc_public_repos/langchainjs/environment_tests/test-exports-bun/src/index.js
import assert from "assert"; import { OpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { CallbackManager } from "@langchain/core/callbacks/manager"; // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); assert(typeof OpenAIEmbeddings === "function"); assert(typeof CallbackManager === "function"); const vs = new MemoryVectorStore(new OpenAIEmbeddings({ openAIApiKey: "sk-XXXX" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1);
0
lc_public_repos/langchainjs/environment_tests/test-exports-bun
lc_public_repos/langchainjs/environment_tests/test-exports-bun/src/index.ts
import assert from "assert"; import { OpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; async function test(useAzure: boolean = false) { // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); // Test dynamic imports of peer dependencies const openAIParameters = useAzure ? { azureOpenAIApiKey: "sk-XXXX", azureOpenAIApiInstanceName: "XXXX", azureOpenAIApiDeploymentName: "XXXX", azureOpenAIApiVersion: "XXXX", } : { openAIApiKey: "sk-XXXX", }; const vs = new MemoryVectorStore(new OpenAIEmbeddings(openAIParameters)); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test(false) .then(() => console.log("openAI Api success")) .catch((e) => { console.error(e); process.exit(1); }); test(true) .then(() => console.log("Azure openAI Api success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-bun
lc_public_repos/langchainjs/environment_tests/test-exports-bun/src/import.cjs
async function test() { const { default: assert } = await import("assert"); const { OpenAI } = await import("@langchain/openai"); const { LLMChain } = await import("langchain/chains"); const { ChatPromptTemplate } = await import("@langchain/core/prompts"); const { MemoryVectorStore } = await import("langchain/vectorstores/memory"); const { OpenAIEmbeddings } = await import("@langchain/openai"); const { Document } = await import("@langchain/core/documents"); // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const vs = new MemoryVectorStore(new OpenAIEmbeddings({ openAIApiKey: "sk-XXXX" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test() .then(() => console.log("success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-bun
lc_public_repos/langchainjs/environment_tests/test-exports-bun/src/entrypoints.js
export * from "langchain/load"; export * from "langchain/load/serializable"; export * from "langchain/agents"; export * from "langchain/agents/toolkits"; export * from "langchain/agents/format_scratchpad"; export * from "langchain/agents/format_scratchpad/openai_tools"; export * from "langchain/agents/format_scratchpad/log"; export * from "langchain/agents/format_scratchpad/xml"; export * from "langchain/agents/format_scratchpad/log_to_message"; export * from "langchain/agents/react/output_parser"; export * from "langchain/agents/xml/output_parser"; export * from "langchain/agents/openai/output_parser"; export * from "langchain/tools"; export * from "langchain/tools/chain"; export * from "langchain/tools/render"; export * from "langchain/tools/retriever"; export * from "langchain/chains"; export * from "langchain/chains/combine_documents"; export * from "langchain/chains/combine_documents/reduce"; export * from "langchain/chains/history_aware_retriever"; export * from "langchain/chains/openai_functions"; export * from "langchain/chains/retrieval"; export * from "langchain/embeddings/cache_backed"; export * from "langchain/embeddings/fake"; export * from "langchain/vectorstores/memory"; export * from "langchain/text_splitter"; export * from "langchain/memory"; export * from "langchain/memory/chat_memory"; export * from "langchain/document"; export * from "langchain/document_loaders/base"; export * from "langchain/document_transformers/openai_functions"; export * from "langchain/callbacks"; export * from "langchain/output_parsers"; export * from "langchain/retrievers/contextual_compression"; export * from "langchain/retrievers/document_compressors"; export * from "langchain/retrievers/ensemble"; export * from "langchain/retrievers/multi_query"; export * from "langchain/retrievers/multi_vector"; export * from "langchain/retrievers/parent_document"; export * from "langchain/retrievers/time_weighted"; export * from "langchain/retrievers/document_compressors/chain_extract"; export * from "langchain/retrievers/document_compressors/embeddings_filter"; export * from "langchain/retrievers/hyde"; export * from "langchain/retrievers/score_threshold"; export * from "langchain/retrievers/matryoshka_retriever"; export * from "langchain/stores/doc/base"; export * from "langchain/stores/doc/in_memory"; export * from "langchain/stores/file/in_memory"; export * from "langchain/stores/message/in_memory"; export * from "langchain/storage/encoder_backed"; export * from "langchain/storage/in_memory"; export * from "langchain/util/document"; export * from "langchain/util/math"; export * from "langchain/util/time"; export * from "langchain/experimental/autogpt"; export * from "langchain/experimental/openai_assistant"; export * from "langchain/experimental/openai_files"; export * from "langchain/experimental/babyagi"; export * from "langchain/experimental/generative_agents"; export * from "langchain/experimental/plan_and_execute"; export * from "langchain/experimental/chains/violation_of_expectations"; export * from "langchain/experimental/masking"; export * from "langchain/experimental/prompts/custom_format"; export * from "langchain/evaluation"; export * from "langchain/smith"; export * from "langchain/runnables/remote"; export * from "langchain/indexes"; export * from "langchain/schema/query_constructor"; export * from "langchain/schema/prompt_template";
0
lc_public_repos/langchainjs/environment_tests/test-exports-bun
lc_public_repos/langchainjs/environment_tests/test-exports-bun/src/require.cjs
const assert = require("assert"); const { OpenAI } = require("@langchain/openai"); const { LLMChain } = require("langchain/chains"); const { ChatPromptTemplate } = require("@langchain/core/prompts"); const { MemoryVectorStore } = require("langchain/vectorstores/memory"); const { OpenAIEmbeddings } = require("@langchain/openai"); const { Document } = require("@langchain/core/documents"); async function test() { // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const vs = new MemoryVectorStore(new OpenAIEmbeddings({ openAIApiKey: "sk-XXXX" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test() .then(() => console.log("success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-bun
lc_public_repos/langchainjs/environment_tests/test-exports-bun/scripts/combine-dependencies.js
import * as fs from "fs"; const langchainPackageJson = JSON.parse(fs.readFileSync("../langchain/package.json")); const testPackageJson = JSON.parse(fs.readFileSync("./package.json")); testPackageJson.dependencies = { ...testPackageJson.dependencies, ...langchainPackageJson.dependencies }; fs.writeFileSync("./package.json", JSON.stringify(testPackageJson, null, 2));
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-esm/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "./dist", "rootDir": "./src", "lib": [ "ES2021", "ES2022.Object", "DOM" ], "target": "ES2021", "module": "nodenext", }, "include": [ "src/**/*" ] }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-esm/package.json
{ "name": "test-exports-esm", "version": "0.0.0", "workspaces": [ "libs/*" ], "private": true, "description": "Tests for the things exported by the langchain package", "main": "./index.mjs", "type": "module", "scripts": { "build": "tsc", "test": "npm run test:esm && npm run test:cjs && npm run test:cjs:import && npm run test:entrypoints && npm run test:ts", "test:esm": "node src/index.js", "test:cjs": "node src/require.cjs", "test:cjs:import": "node src/import.cjs", "test:entrypoints": "node src/entrypoints.js", "test:ts": "node dist/index.js", "format": "prettier --write src", "format:check": "prettier --check src" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/anthropic": "workspace:*", "@langchain/community": "workspace:*", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:*", "@tsconfig/recommended": "^1.0.2", "@xenova/transformers": "^2.17.2", "langchain": "workspace:*", "typescript": "^5.0.0" }, "devDependencies": { "@types/node": "^18.15.11", "prettier": "^2.8.3" } }
0
lc_public_repos/langchainjs/environment_tests/test-exports-esm
lc_public_repos/langchainjs/environment_tests/test-exports-esm/src/index.js
import assert from "assert"; import { OpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { HuggingFaceTransformersEmbeddings } from "@langchain/community/embeddings/hf_transformers"; import { Document } from "@langchain/core/documents"; import { CallbackManager } from "@langchain/core/callbacks/manager"; // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); assert(typeof HuggingFaceTransformersEmbeddings === "function"); assert(typeof CallbackManager === "function"); const vs = new MemoryVectorStore(new HuggingFaceTransformersEmbeddings({ model: "Xenova/all-MiniLM-L6-v2", })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1);
0
lc_public_repos/langchainjs/environment_tests/test-exports-esm
lc_public_repos/langchainjs/environment_tests/test-exports-esm/src/index.ts
import assert from "assert"; import { OpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { HuggingFaceTransformersEmbeddings } from "@langchain/community/embeddings/hf_transformers"; import { Document } from "@langchain/core/documents"; async function test(useAzure: boolean = false) { // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const openAIParameters = useAzure ? { azureOpenAIApiKey: "sk-XXXX", azureOpenAIApiInstanceName: "XXXX", azureOpenAIApiDeploymentName: "XXXX", azureOpenAIApiVersion: "XXXX", } : { openAIApiKey: "sk-XXXX", }; const vs = new MemoryVectorStore(new HuggingFaceTransformersEmbeddings({ model: "Xenova/all-MiniLM-L6-v2", })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test(false) .then(() => console.log("openAI Api success")) .catch((e) => { console.error(e); process.exit(1); }); test(true) .then(() => console.log("Azure openAI Api success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-esm
lc_public_repos/langchainjs/environment_tests/test-exports-esm/src/import.cjs
async function test() { const { default: assert } = await import("assert"); const { OpenAI } = await import("@langchain/openai"); const { LLMChain } = await import("langchain/chains"); const { ChatPromptTemplate } = await import("@langchain/core/prompts"); const { MemoryVectorStore } = await import("langchain/vectorstores/memory"); const { HuggingFaceTransformersEmbeddings } = await import("@langchain/community/embeddings/hf_transformers"); const { Document } = await import("@langchain/core/documents"); // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const vs = new MemoryVectorStore(new HuggingFaceTransformersEmbeddings({ model: "Xenova/all-MiniLM-L6-v2", })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test() .then(() => console.log("success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-esm
lc_public_repos/langchainjs/environment_tests/test-exports-esm/src/entrypoints.js
import * as load from "langchain/load"; import * as load_serializable from "langchain/load/serializable"; import * as agents from "langchain/agents"; import * as agents_toolkits from "langchain/agents/toolkits"; import * as agents_format_scratchpad from "langchain/agents/format_scratchpad"; import * as agents_format_scratchpad_openai_tools from "langchain/agents/format_scratchpad/openai_tools"; import * as agents_format_scratchpad_log from "langchain/agents/format_scratchpad/log"; import * as agents_format_scratchpad_xml from "langchain/agents/format_scratchpad/xml"; import * as agents_format_scratchpad_log_to_message from "langchain/agents/format_scratchpad/log_to_message"; import * as agents_react_output_parser from "langchain/agents/react/output_parser"; import * as agents_xml_output_parser from "langchain/agents/xml/output_parser"; import * as agents_openai_output_parser from "langchain/agents/openai/output_parser"; import * as tools from "langchain/tools"; import * as tools_chain from "langchain/tools/chain"; import * as tools_render from "langchain/tools/render"; import * as tools_retriever from "langchain/tools/retriever"; import * as chains from "langchain/chains"; import * as chains_combine_documents from "langchain/chains/combine_documents"; import * as chains_combine_documents_reduce from "langchain/chains/combine_documents/reduce"; import * as chains_history_aware_retriever from "langchain/chains/history_aware_retriever"; import * as chains_openai_functions from "langchain/chains/openai_functions"; import * as chains_retrieval from "langchain/chains/retrieval"; import * as embeddings_cache_backed from "langchain/embeddings/cache_backed"; import * as embeddings_fake from "langchain/embeddings/fake"; import * as vectorstores_memory from "langchain/vectorstores/memory"; import * as text_splitter from "langchain/text_splitter"; import * as memory from "langchain/memory"; import * as memory_chat_memory from "langchain/memory/chat_memory"; import * as document from "langchain/document"; import * as document_loaders_base from "langchain/document_loaders/base"; import * as document_transformers_openai_functions from "langchain/document_transformers/openai_functions"; import * as callbacks from "langchain/callbacks"; import * as output_parsers from "langchain/output_parsers"; import * as retrievers_contextual_compression from "langchain/retrievers/contextual_compression"; import * as retrievers_document_compressors from "langchain/retrievers/document_compressors"; import * as retrievers_ensemble from "langchain/retrievers/ensemble"; import * as retrievers_multi_query from "langchain/retrievers/multi_query"; import * as retrievers_multi_vector from "langchain/retrievers/multi_vector"; import * as retrievers_parent_document from "langchain/retrievers/parent_document"; import * as retrievers_time_weighted from "langchain/retrievers/time_weighted"; import * as retrievers_document_compressors_chain_extract from "langchain/retrievers/document_compressors/chain_extract"; import * as retrievers_document_compressors_embeddings_filter from "langchain/retrievers/document_compressors/embeddings_filter"; import * as retrievers_hyde from "langchain/retrievers/hyde"; import * as retrievers_score_threshold from "langchain/retrievers/score_threshold"; import * as retrievers_matryoshka_retriever from "langchain/retrievers/matryoshka_retriever"; import * as stores_doc_base from "langchain/stores/doc/base"; import * as stores_doc_in_memory from "langchain/stores/doc/in_memory"; import * as stores_file_in_memory from "langchain/stores/file/in_memory"; import * as stores_message_in_memory from "langchain/stores/message/in_memory"; import * as storage_encoder_backed from "langchain/storage/encoder_backed"; import * as storage_in_memory from "langchain/storage/in_memory"; import * as util_document from "langchain/util/document"; import * as util_math from "langchain/util/math"; import * as util_time from "langchain/util/time"; import * as experimental_autogpt from "langchain/experimental/autogpt"; import * as experimental_openai_assistant from "langchain/experimental/openai_assistant"; import * as experimental_openai_files from "langchain/experimental/openai_files"; import * as experimental_babyagi from "langchain/experimental/babyagi"; import * as experimental_generative_agents from "langchain/experimental/generative_agents"; import * as experimental_plan_and_execute from "langchain/experimental/plan_and_execute"; import * as experimental_chains_violation_of_expectations from "langchain/experimental/chains/violation_of_expectations"; import * as experimental_masking from "langchain/experimental/masking"; import * as experimental_prompts_custom_format from "langchain/experimental/prompts/custom_format"; import * as evaluation from "langchain/evaluation"; import * as smith from "langchain/smith"; import * as runnables_remote from "langchain/runnables/remote"; import * as indexes from "langchain/indexes"; import * as schema_query_constructor from "langchain/schema/query_constructor"; import * as schema_prompt_template from "langchain/schema/prompt_template";
0
lc_public_repos/langchainjs/environment_tests/test-exports-esm
lc_public_repos/langchainjs/environment_tests/test-exports-esm/src/require.cjs
const assert = require("assert"); const { OpenAI } = require("@langchain/openai"); const { LLMChain } = require("langchain/chains"); const { ChatPromptTemplate } = require("@langchain/core/prompts"); const { MemoryVectorStore } = require("langchain/vectorstores/memory"); const { HuggingFaceTransformersEmbeddings } = require("@langchain/community/embeddings/hf_transformers"); const { Document } = require("@langchain/core/documents"); async function test() { // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const vs = new MemoryVectorStore(new HuggingFaceTransformersEmbeddings({ model: "Xenova/all-MiniLM-L6-v2", })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test() .then(() => console.log("success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-tsc/main.ts
import { ChatOpenAI } from "@langchain/openai"; import { createOpenAIToolsAgent, AgentExecutor } from "langchain/agents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; const model = new ChatOpenAI({ openAIApiKey: "sk-XXXX", }); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["placeholder", "{chat_history}"], ["human", "{input}"], ["placeholder", "{agent_scratchpad}"], ]); const agent = await createOpenAIToolsAgent({ llm: model, prompt, tools: [], }); const agentExecutor = new AgentExecutor({ agent, tools: [], }); console.log(agentExecutor);
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-tsc/package.json
{ "name": "test-exports-tsc", "version": "0.0.0", "type": "module", "workspaces": [ "libs/*" ], "private": true, "description": "TSC Tests for the things exported by the langchain package", "main": "./index.mjs", "scripts": { "build": "tsc -m nodenext main.ts", "test": "node ./main.js" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/anthropic": "workspace:*", "@langchain/community": "workspace:*", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:*", "langchain": "workspace:*", "typescript": "5.5.4" }, "devDependencies": { "@types/node": "^18.15.11", "prettier": "^2.8.3" } }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-cjs/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "./dist", "rootDir": "./src", "lib": [ "ES2021", "ES2022.Object", "DOM" ], "target": "ES2021", }, "include": [ "src/**/*" ] }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-cjs/package.json
{ "name": "test-exports-cjs", "version": "0.0.0", "workspaces": [ "libs/*" ], "private": true, "description": "CJS Tests for the things exported by the langchain package", "main": "./index.mjs", "scripts": { "build": "tsc", "test": "npm run test:esm && npm run test:cjs && npm run test:cjs:import && npm run test:entrypoints && npm run test:ts", "test:esm": "node src/index.mjs", "test:cjs": "node src/require.js", "test:cjs:import": "node src/import.js", "test:entrypoints": "node src/entrypoints.js", "test:ts": "node dist/index.js", "format": "prettier --write src", "format:check": "prettier --check src" }, "author": "LangChain", "license": "MIT", "dependencies": { "@langchain/anthropic": "workspace:*", "@langchain/community": "workspace:*", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:*", "@tsconfig/recommended": "^1.0.2", "@xenova/transformers": "^2.17.2", "langchain": "workspace:*", "typescript": "^5.0.0" }, "devDependencies": { "@types/node": "^18.15.11", "prettier": "^2.8.3" } }
0
lc_public_repos/langchainjs/environment_tests/test-exports-cjs
lc_public_repos/langchainjs/environment_tests/test-exports-cjs/src/import.js
async function test() { const { default: assert } = await import("assert"); const { OpenAI } = await import("@langchain/openai"); const { LLMChain } = await import("langchain/chains"); const { ChatPromptTemplate } = await import("@langchain/core/prompts"); const { HuggingFaceTransformersEmbeddings } = await import("@langchain/community/embeddings/hf_transformers"); const { Document } = await import("@langchain/core/documents"); const { MemoryVectorStore } = await import("langchain/vectorstores/memory"); // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const vs = new MemoryVectorStore(new HuggingFaceTransformersEmbeddings({ model: "Xenova/all-MiniLM-L6-v2" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test() .then(() => console.log("success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-cjs
lc_public_repos/langchainjs/environment_tests/test-exports-cjs/src/index.mjs
import assert from "assert"; import { OpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { HuggingFaceTransformersEmbeddings } from "@langchain/community/embeddings/hf_transformers"; import { Document } from "@langchain/core/documents"; // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const vs = new MemoryVectorStore(new HuggingFaceTransformersEmbeddings({ model: "Xenova/all-MiniLM-L6-v2" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1);
0
lc_public_repos/langchainjs/environment_tests/test-exports-cjs
lc_public_repos/langchainjs/environment_tests/test-exports-cjs/src/index.ts
import assert from "assert"; import { OpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { HuggingFaceTransformersEmbeddings } from "@langchain/community/embeddings/hf_transformers"; import { Document } from "@langchain/core/documents"; async function test(useAzure: boolean = false) { // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); // Test dynamic imports of peer dependencies const openAIParameters = useAzure ? { azureOpenAIApiKey: "sk-XXXX", azureOpenAIApiInstanceName: "XXXX", azureOpenAIApiDeploymentName: "XXXX", azureOpenAIApiVersion: "XXXX", } : { openAIApiKey: "sk-XXXX", }; const vs = new MemoryVectorStore(new HuggingFaceTransformersEmbeddings({ model: "Xenova/all-MiniLM-L6-v2" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test(false) .then(() => console.log("openAI Api success")) .catch((e) => { console.error(e); process.exit(1); }); test(true) .then(() => console.log("azure openAI Api success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-cjs
lc_public_repos/langchainjs/environment_tests/test-exports-cjs/src/entrypoints.js
const load = require("langchain/load"); const load_serializable = require("langchain/load/serializable"); const agents = require("langchain/agents"); const agents_toolkits = require("langchain/agents/toolkits"); const agents_format_scratchpad = require("langchain/agents/format_scratchpad"); const agents_format_scratchpad_openai_tools = require("langchain/agents/format_scratchpad/openai_tools"); const agents_format_scratchpad_log = require("langchain/agents/format_scratchpad/log"); const agents_format_scratchpad_xml = require("langchain/agents/format_scratchpad/xml"); const agents_format_scratchpad_log_to_message = require("langchain/agents/format_scratchpad/log_to_message"); const agents_react_output_parser = require("langchain/agents/react/output_parser"); const agents_xml_output_parser = require("langchain/agents/xml/output_parser"); const agents_openai_output_parser = require("langchain/agents/openai/output_parser"); const tools = require("langchain/tools"); const tools_chain = require("langchain/tools/chain"); const tools_render = require("langchain/tools/render"); const tools_retriever = require("langchain/tools/retriever"); const chains = require("langchain/chains"); const chains_combine_documents = require("langchain/chains/combine_documents"); const chains_combine_documents_reduce = require("langchain/chains/combine_documents/reduce"); const chains_history_aware_retriever = require("langchain/chains/history_aware_retriever"); const chains_openai_functions = require("langchain/chains/openai_functions"); const chains_retrieval = require("langchain/chains/retrieval"); const embeddings_cache_backed = require("langchain/embeddings/cache_backed"); const embeddings_fake = require("langchain/embeddings/fake"); const vectorstores_memory = require("langchain/vectorstores/memory"); const text_splitter = require("langchain/text_splitter"); const memory = require("langchain/memory"); const memory_chat_memory = require("langchain/memory/chat_memory"); const document = require("langchain/document"); const document_loaders_base = require("langchain/document_loaders/base"); const document_transformers_openai_functions = require("langchain/document_transformers/openai_functions"); const callbacks = require("langchain/callbacks"); const output_parsers = require("langchain/output_parsers"); const retrievers_contextual_compression = require("langchain/retrievers/contextual_compression"); const retrievers_document_compressors = require("langchain/retrievers/document_compressors"); const retrievers_ensemble = require("langchain/retrievers/ensemble"); const retrievers_multi_query = require("langchain/retrievers/multi_query"); const retrievers_multi_vector = require("langchain/retrievers/multi_vector"); const retrievers_parent_document = require("langchain/retrievers/parent_document"); const retrievers_time_weighted = require("langchain/retrievers/time_weighted"); const retrievers_document_compressors_chain_extract = require("langchain/retrievers/document_compressors/chain_extract"); const retrievers_document_compressors_embeddings_filter = require("langchain/retrievers/document_compressors/embeddings_filter"); const retrievers_hyde = require("langchain/retrievers/hyde"); const retrievers_score_threshold = require("langchain/retrievers/score_threshold"); const retrievers_matryoshka_retriever = require("langchain/retrievers/matryoshka_retriever"); const stores_doc_base = require("langchain/stores/doc/base"); const stores_doc_in_memory = require("langchain/stores/doc/in_memory"); const stores_file_in_memory = require("langchain/stores/file/in_memory"); const stores_message_in_memory = require("langchain/stores/message/in_memory"); const storage_encoder_backed = require("langchain/storage/encoder_backed"); const storage_in_memory = require("langchain/storage/in_memory"); const util_document = require("langchain/util/document"); const util_math = require("langchain/util/math"); const util_time = require("langchain/util/time"); const experimental_autogpt = require("langchain/experimental/autogpt"); const experimental_openai_assistant = require("langchain/experimental/openai_assistant"); const experimental_openai_files = require("langchain/experimental/openai_files"); const experimental_babyagi = require("langchain/experimental/babyagi"); const experimental_generative_agents = require("langchain/experimental/generative_agents"); const experimental_plan_and_execute = require("langchain/experimental/plan_and_execute"); const experimental_chains_violation_of_expectations = require("langchain/experimental/chains/violation_of_expectations"); const experimental_masking = require("langchain/experimental/masking"); const experimental_prompts_custom_format = require("langchain/experimental/prompts/custom_format"); const evaluation = require("langchain/evaluation"); const smith = require("langchain/smith"); const runnables_remote = require("langchain/runnables/remote"); const indexes = require("langchain/indexes"); const schema_query_constructor = require("langchain/schema/query_constructor"); const schema_prompt_template = require("langchain/schema/prompt_template");
0
lc_public_repos/langchainjs/environment_tests/test-exports-cjs
lc_public_repos/langchainjs/environment_tests/test-exports-cjs/src/require.js
const assert = require("assert"); const { OpenAI } = require("@langchain/openai"); const { LLMChain } = require("langchain/chains"); const { ChatPromptTemplate } = require("@langchain/core/prompts"); const { MemoryVectorStore } = require("langchain/vectorstores/memory"); const { HuggingFaceTransformersEmbeddings } = require("@langchain/community/embeddings/hf_transformers"); const { Document } = require("@langchain/core/documents"); async function test() { // Test exports assert(typeof OpenAI === "function"); assert(typeof LLMChain === "function"); assert(typeof ChatPromptTemplate === "function"); assert(typeof MemoryVectorStore === "function"); const vs = new MemoryVectorStore(new HuggingFaceTransformersEmbeddings({ model: "Xenova/all-MiniLM-L6-v2" })); await vs.addVectors( [ [0, 1, 0], [0, 0, 1], ], [ new Document({ pageContent: "a", }), new Document({ pageContent: "b", }), ] ); assert((await vs.similaritySearchVectorWithScore([0, 0, 1], 1)).length === 1); } test() .then(() => console.log("success")) .catch((e) => { console.error(e); process.exit(1); });
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-cf/tsconfig.json
{ "compilerOptions": { /* Visit https://aka.ms/tsconfig.json to read more about this file */ /* Projects */ // "incremental": true, /* Enable incremental compilation */ // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ // "tsBuildInfoFile": "./", /* Specify the folder for .tsbuildinfo incremental compilation files. */ // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects */ // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */ // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */ /* Language and Environment */ "target": "es2021" /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */, "lib": [ "es2021" ] /* Specify a set of bundled library declaration files that describe the target runtime environment. */, "jsx": "react" /* Specify what JSX code is generated. */, // "experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */ // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */ // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h' */ // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */ // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using `jsx: react-jsx*`.` */ // "reactNamespace": "", /* Specify the object invoked for `createElement`. This only applies when targeting `react` JSX emit. */ // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ /* Modules */ "module": "es2022" /* Specify what module code is generated. */, // "rootDir": "./", /* Specify the root folder within your source files. */ "moduleResolution": "node" /* Specify how TypeScript looks up a file from a given module specifier. */, // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ // "typeRoots": [], /* Specify multiple folders that act like `./node_modules/@types`. */ "types": [ "@cloudflare/workers-types" ] /* Specify type package names to be included without being referenced in a source file. */, // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ "resolveJsonModule": true /* Enable importing .json files */, // "noResolve": true, /* Disallow `import`s, `require`s or `<reference>`s from expanding the number of files TypeScript should add to a project. */ /* JavaScript Support */ "allowJs": true /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */, "checkJs": false /* Enable error reporting in type-checked JavaScript files. */, // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from `node_modules`. Only applicable with `allowJs`. */ /* Emit */ // "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ // "declarationMap": true, /* Create sourcemaps for d.ts files. */ // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ // "sourceMap": true, /* Create source map files for emitted JavaScript files. */ // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If `declaration` is true, also designates a file that bundles all .d.ts output. */ // "outDir": "./", /* Specify an output folder for all emitted files. */ // "removeComments": true, /* Disable emitting comments. */ "noEmit": true /* Disable emitting files from a compilation. */, // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ // "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types */ // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */ // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */ // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */ // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */ // "newLine": "crlf", /* Set the newline character for emitting files. */ // "stripInternal": true, /* Disable emitting declarations that have `@internal` in their JSDoc comments. */ // "noEmitHelpers": true, /* Disable generating custom helper functions like `__extends` in compiled output. */ // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */ // "preserveConstEnums": true, /* Disable erasing `const enum` declarations in generated code. */ // "declarationDir": "./", /* Specify the output directory for generated declaration files. */ // "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */ /* Interop Constraints */ "isolatedModules": true /* Ensure that each file can be safely transpiled without relying on other imports. */, "allowSyntheticDefaultImports": true /* Allow 'import x from y' when a module doesn't have a default export. */, // "esModuleInterop": true /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables `allowSyntheticDefaultImports` for type compatibility. */, // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */ "forceConsistentCasingInFileNames": true /* Ensure that casing is correct in imports. */, /* Type Checking */ "strict": true /* Enable all strict type-checking options. */, // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied `any` type.. */ // "strictNullChecks": true, /* When type checking, take into account `null` and `undefined`. */ // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ // "strictBindCallApply": true, /* Check that the arguments for `bind`, `call`, and `apply` methods match the original function. */ // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ // "noImplicitThis": true, /* Enable error reporting when `this` is given the type `any`. */ // "useUnknownInCatchVariables": true, /* Type catch clause variables as 'unknown' instead of 'any'. */ // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */ // "noUnusedLocals": true, /* Enable error reporting when a local variables aren't read. */ // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read */ // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ // "noUncheckedIndexedAccess": true, /* Include 'undefined' in index signature results */ // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type */ // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */ // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */ /* Completeness */ // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */ "skipLibCheck": true /* Skip type checking all .d.ts files. */ } }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-cf/README.md
# test-exports-cf This package was generated with `wrangler init` with the purpose of testing compatibility with Cloudlfare Workers.
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-cf/wrangler.toml
name = "test-exports-cf" main = "src/index.ts" compatibility_date = "2024-01-10"
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-cf/package.json
{ "name": "test-exports-cf", "version": "0.0.0", "workspaces": [ "libs/*" ], "devDependencies": { "@cloudflare/workers-types": "^4.20230321.0" }, "dependencies": { "@langchain/anthropic": "workspace:*", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:*", "@tsconfig/recommended": "^1.0.2", "langchain": "workspace:*", "wrangler": "^3.19.0", "vitest": "0.34.3", "typescript": "^5.0.3" }, "private": true, "scripts": { "start": "wrangler dev", "deploy": "wrangler deploy", "build": "wrangler deploy --dry-run --outdir=dist", "test": "vitest run **/*.unit.test.ts", "test:integration": "vitest run **/*.int.test.ts" } }
0
lc_public_repos/langchainjs/environment_tests/test-exports-cf
lc_public_repos/langchainjs/environment_tests/test-exports-cf/src/index.int.test.ts
import { unstable_dev } from "wrangler"; import type { UnstableDevWorker } from "wrangler"; import { describe, expect, it, beforeAll, afterAll } from "vitest"; describe("Worker", () => { let worker: UnstableDevWorker; beforeAll(async () => { worker = await unstable_dev("src/index.ts", { experimental: { disableExperimentalWarning: true }, }); }); afterAll(async () => { await worker.stop(); }); it("should return Hello World", async () => { const resp = await worker.fetch(); expect(resp.ok).toBe(true); if (resp) { const text = await resp.text(); expect(text.startsWith("Hello")).toBe(true); } }, 30000); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-cf
lc_public_repos/langchainjs/environment_tests/test-exports-cf/src/index.unit.test.ts
import { unstable_dev } from "wrangler"; import type { UnstableDevWorker } from "wrangler"; import { describe, expect, it, beforeAll, afterAll } from "vitest"; describe("Worker", () => { let worker: UnstableDevWorker; beforeAll(async () => { worker = await unstable_dev("src/index.ts", { experimental: { disableExperimentalWarning: true }, }); }, 30000); afterAll(async () => { await worker.stop(); }); it("should start", async () => { expect(true).toBe(true); }); });
0
lc_public_repos/langchainjs/environment_tests/test-exports-cf
lc_public_repos/langchainjs/environment_tests/test-exports-cf/src/index.ts
/** * Welcome to Cloudflare Workers! This is your first worker. * * - Run `wrangler dev src/index.ts` in your terminal to start a development server * - Open a browser tab at http://localhost:8787/ to see your worker in action * - Run `wrangler deploy src/index.ts --name my-worker` to publish your worker * * Learn more at https://developers.cloudflare.com/workers/ */ // import all entrypoints to test, do not do this in your own app import "./entrypoints.js"; // Import a few things we'll use to test the exports import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, HumanMessagePromptTemplate, } from "@langchain/core/prompts"; import { OpenAI } from "@langchain/openai"; import { OpenAIEmbeddings } from "@langchain/openai"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; export interface Env { OPENAI_API_KEY?: string; AZURE_OPENAI_API_KEY?: string; AZURE_OPENAI_API_INSTANCE_NAME?: string; AZURE_OPENAI_API_DEPLOYMENT_NAME?: string; AZURE_OPENAI_API_VERSION?: string; } export default { async fetch( request: Request, env: Env, ctx: ExecutionContext ): Promise<Response> { const constructorParameters = env.AZURE_OPENAI_API_KEY ? { azureOpenAIApiKey: env.AZURE_OPENAI_API_KEY, azureOpenAIApiInstanceName: env.AZURE_OPENAI_API_INSTANCE_NAME, azureOpenAIApiDeploymentName: env.AZURE_OPENAI_API_DEPLOYMENT_NAME, azureOpenAIApiVersion: env.AZURE_OPENAI_API_VERSION, } : { openAIApiKey: env.OPENAI_API_KEY, }; // Intantiate a few things to test the exports new OpenAI(constructorParameters); const emb = new OpenAIEmbeddings(constructorParameters); // Test a chain + prompt + model const prompt = ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate("{input}"), ]); const llm = new ChatOpenAI(constructorParameters); const chain = prompt.pipe(llm).pipe(new StringOutputParser()); const res = await chain.invoke("hello"); return new Response( `Hello, from Cloudflare Worker at ${request.url}. Assistant says: ${res}` ); }, };
0
lc_public_repos/langchainjs/environment_tests/test-exports-cf
lc_public_repos/langchainjs/environment_tests/test-exports-cf/src/entrypoints.js
export * from "langchain/load"; export * from "langchain/load/serializable"; export * from "langchain/agents"; export * from "langchain/agents/toolkits"; export * from "langchain/agents/format_scratchpad"; export * from "langchain/agents/format_scratchpad/openai_tools"; export * from "langchain/agents/format_scratchpad/log"; export * from "langchain/agents/format_scratchpad/xml"; export * from "langchain/agents/format_scratchpad/log_to_message"; export * from "langchain/agents/react/output_parser"; export * from "langchain/agents/xml/output_parser"; export * from "langchain/agents/openai/output_parser"; export * from "langchain/tools"; export * from "langchain/tools/chain"; export * from "langchain/tools/render"; export * from "langchain/tools/retriever"; export * from "langchain/chains"; export * from "langchain/chains/combine_documents"; export * from "langchain/chains/combine_documents/reduce"; export * from "langchain/chains/history_aware_retriever"; export * from "langchain/chains/openai_functions"; export * from "langchain/chains/retrieval"; export * from "langchain/embeddings/cache_backed"; export * from "langchain/embeddings/fake"; export * from "langchain/vectorstores/memory"; export * from "langchain/text_splitter"; export * from "langchain/memory"; export * from "langchain/memory/chat_memory"; export * from "langchain/document"; export * from "langchain/document_loaders/base"; export * from "langchain/document_transformers/openai_functions"; export * from "langchain/callbacks"; export * from "langchain/output_parsers"; export * from "langchain/retrievers/contextual_compression"; export * from "langchain/retrievers/document_compressors"; export * from "langchain/retrievers/ensemble"; export * from "langchain/retrievers/multi_query"; export * from "langchain/retrievers/multi_vector"; export * from "langchain/retrievers/parent_document"; export * from "langchain/retrievers/time_weighted"; export * from "langchain/retrievers/document_compressors/chain_extract"; export * from "langchain/retrievers/document_compressors/embeddings_filter"; export * from "langchain/retrievers/hyde"; export * from "langchain/retrievers/score_threshold"; export * from "langchain/retrievers/matryoshka_retriever"; export * from "langchain/stores/doc/base"; export * from "langchain/stores/doc/in_memory"; export * from "langchain/stores/file/in_memory"; export * from "langchain/stores/message/in_memory"; export * from "langchain/storage/encoder_backed"; export * from "langchain/storage/in_memory"; export * from "langchain/util/document"; export * from "langchain/util/math"; export * from "langchain/util/time"; export * from "langchain/experimental/autogpt"; export * from "langchain/experimental/openai_assistant"; export * from "langchain/experimental/openai_files"; export * from "langchain/experimental/babyagi"; export * from "langchain/experimental/generative_agents"; export * from "langchain/experimental/plan_and_execute"; export * from "langchain/experimental/chains/violation_of_expectations"; export * from "langchain/experimental/masking"; export * from "langchain/experimental/prompts/custom_format"; export * from "langchain/evaluation"; export * from "langchain/smith"; export * from "langchain/runnables/remote"; export * from "langchain/indexes"; export * from "langchain/schema/query_constructor"; export * from "langchain/schema/prompt_template";
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-vite/index.html
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <link rel="icon" type="image/svg+xml" href="/vite.svg" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>Vite + TS</title> </head> <body> <div id="app"></div> <script type="module" src="/src/main.ts"></script> </body> </html>
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-vite/tsconfig.json
{ "compilerOptions": { "target": "ESNext", "useDefineForClassFields": true, "module": "ESNext", "lib": ["ESNext", "DOM"], "moduleResolution": "Node", "strict": true, "resolveJsonModule": true, "isolatedModules": true, "esModuleInterop": true, "noEmit": true, "noUnusedLocals": true, "noUnusedParameters": true, "noImplicitReturns": true, "skipLibCheck": true }, "include": ["src"] }
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/test-exports-vite/package.json
{ "name": "test-exports-vite", "version": "0.0.0", "workspaces": [ "libs/*" ], "private": true, "type": "module", "scripts": { "dev": "vite", "build": "vite build", "preview": "vite preview", "test": "tsc" }, "dependencies": { "@langchain/anthropic": "workspace:*", "@langchain/community": "workspace:*", "@langchain/core": "workspace:*", "@langchain/openai": "workspace:*", "langchain": "workspace:*", "typescript": "^5.0.0", "vite": "^4.2.0" } }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vite
lc_public_repos/langchainjs/environment_tests/test-exports-vite/src/typescript.svg
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="32" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path fill="#007ACC" d="M0 128v128h256V0H0z"></path><path fill="#FFF" d="m56.612 128.85l-.081 10.483h33.32v94.68h23.568v-94.68h33.321v-10.28c0-5.69-.122-10.444-.284-10.566c-.122-.162-20.4-.244-44.983-.203l-44.74.122l-.121 10.443Zm149.955-10.742c6.501 1.625 11.459 4.51 16.01 9.224c2.357 2.52 5.851 7.111 6.136 8.208c.08.325-11.053 7.802-17.798 11.988c-.244.162-1.22-.894-2.317-2.52c-3.291-4.795-6.745-6.867-12.028-7.233c-7.76-.528-12.759 3.535-12.718 10.321c0 1.992.284 3.17 1.097 4.795c1.707 3.536 4.876 5.649 14.832 9.956c18.326 7.883 26.168 13.084 31.045 20.48c5.445 8.249 6.664 21.415 2.966 31.208c-4.063 10.646-14.14 17.879-28.323 20.276c-4.388.772-14.79.65-19.504-.203c-10.28-1.828-20.033-6.908-26.047-13.572c-2.357-2.6-6.949-9.387-6.664-9.874c.122-.163 1.178-.813 2.356-1.504c1.138-.65 5.446-3.129 9.509-5.485l7.355-4.267l1.544 2.276c2.154 3.29 6.867 7.801 9.712 9.305c8.167 4.307 19.383 3.698 24.909-1.26c2.357-2.153 3.332-4.388 3.332-7.68c0-2.966-.366-4.266-1.91-6.501c-1.99-2.845-6.054-5.242-17.595-10.24c-13.206-5.69-18.895-9.224-24.096-14.832c-3.007-3.25-5.852-8.452-7.03-12.8c-.975-3.617-1.22-12.678-.447-16.335c2.723-12.76 12.353-21.659 26.25-24.3c4.51-.853 14.994-.528 19.424.569Z"></path></svg>
0
lc_public_repos/langchainjs/environment_tests/test-exports-vite
lc_public_repos/langchainjs/environment_tests/test-exports-vite/src/chain.ts
// Import a few things we'll use to test the exports import { LLMChain } from "langchain/chains"; import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, HumanMessagePromptTemplate, } from "@langchain/core/prompts"; import { CallbackManager } from "@langchain/core/callbacks/manager"; export function setupChain(element: HTMLButtonElement) { const runChain = async () => { const llm = new ChatOpenAI({ // Don't do this in your app, it would leak your API key openAIApiKey: import.meta.env.VITE_OPENAI_API_KEY, streaming: true, callbackManager: CallbackManager.fromHandlers({ handleLLMNewToken: async (token) => console.log("handleLLMNewToken", token), }), }); // Test count tokens const n = await llm.getNumTokens("Hello"); console.log("getNumTokens", n); // Test a chain + prompt + model const chain = new LLMChain({ llm, prompt: ChatPromptTemplate.fromMessages([ HumanMessagePromptTemplate.fromTemplate("{input}"), ]), }); const res = await chain.run("hello"); console.log("runChain", res); }; element.addEventListener("click", runChain); }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vite
lc_public_repos/langchainjs/environment_tests/test-exports-vite/src/style.css
:root { font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; line-height: 1.5; font-weight: 400; color-scheme: light dark; color: rgba(255, 255, 255, 0.87); background-color: #242424; font-synthesis: none; text-rendering: optimizeLegibility; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; -webkit-text-size-adjust: 100%; } a { font-weight: 500; color: #646cff; text-decoration: inherit; } a:hover { color: #535bf2; } body { margin: 0; display: flex; place-items: center; min-width: 320px; min-height: 100vh; } h1 { font-size: 3.2em; line-height: 1.1; } #app { max-width: 1280px; margin: 0 auto; padding: 2rem; text-align: center; } .logo { height: 6em; padding: 1.5em; will-change: filter; transition: filter 300ms; } .logo:hover { filter: drop-shadow(0 0 2em #646cffaa); } .logo.vanilla:hover { filter: drop-shadow(0 0 2em #3178c6aa); } .card { padding: 2em; } .read-the-docs { color: #888; } button { border-radius: 8px; border: 1px solid transparent; padding: 0.6em 1.2em; font-size: 1em; font-weight: 500; font-family: inherit; background-color: #1a1a1a; cursor: pointer; transition: border-color 0.25s; } button:hover { border-color: #646cff; } button:focus, button:focus-visible { outline: 4px auto -webkit-focus-ring-color; } @media (prefers-color-scheme: light) { :root { color: #213547; background-color: #ffffff; } a:hover { color: #747bff; } button { background-color: #f9f9f9; } }
0
lc_public_repos/langchainjs/environment_tests/test-exports-vite
lc_public_repos/langchainjs/environment_tests/test-exports-vite/src/vite-env.d.ts
/// <reference types="vite/client" />
0
lc_public_repos/langchainjs/environment_tests/test-exports-vite
lc_public_repos/langchainjs/environment_tests/test-exports-vite/src/main.ts
// import all entrypoints to test, do not do this in your own app import "./entrypoints.js"; import "./style.css"; import typescriptLogo from "./typescript.svg"; import viteLogo from "/vite.svg"; import { setupChain } from "./chain"; document.querySelector<HTMLDivElement>("#app")!.innerHTML = ` <div> <a href="https://vitejs.dev" target="_blank"> <img src="${viteLogo}" class="logo" alt="Vite logo" /> </a> <a href="https://www.typescriptlang.org/" target="_blank"> <img src="${typescriptLogo}" class="logo vanilla" alt="TypeScript logo" /> </a> <h1>Vite + TypeScript</h1> <div class="card"> <button id="chain" type="button">click to run chain</button> </div> <p class="read-the-docs"> Click on the Vite and TypeScript logos to learn more </p> </div> `; setupChain(document.querySelector<HTMLButtonElement>("#chain")!);
0
lc_public_repos/langchainjs/environment_tests/test-exports-vite
lc_public_repos/langchainjs/environment_tests/test-exports-vite/src/entrypoints.js
export * from "langchain/load"; export * from "langchain/load/serializable"; export * from "langchain/agents"; export * from "langchain/agents/toolkits"; export * from "langchain/agents/format_scratchpad"; export * from "langchain/agents/format_scratchpad/openai_tools"; export * from "langchain/agents/format_scratchpad/log"; export * from "langchain/agents/format_scratchpad/xml"; export * from "langchain/agents/format_scratchpad/log_to_message"; export * from "langchain/agents/react/output_parser"; export * from "langchain/agents/xml/output_parser"; export * from "langchain/agents/openai/output_parser"; export * from "langchain/tools"; export * from "langchain/tools/chain"; export * from "langchain/tools/render"; export * from "langchain/tools/retriever"; export * from "langchain/chains"; export * from "langchain/chains/combine_documents"; export * from "langchain/chains/combine_documents/reduce"; export * from "langchain/chains/history_aware_retriever"; export * from "langchain/chains/openai_functions"; export * from "langchain/chains/retrieval"; export * from "langchain/embeddings/cache_backed"; export * from "langchain/embeddings/fake"; export * from "langchain/vectorstores/memory"; export * from "langchain/text_splitter"; export * from "langchain/memory"; export * from "langchain/memory/chat_memory"; export * from "langchain/document"; export * from "langchain/document_loaders/base"; export * from "langchain/document_transformers/openai_functions"; export * from "langchain/callbacks"; export * from "langchain/output_parsers"; export * from "langchain/retrievers/contextual_compression"; export * from "langchain/retrievers/document_compressors"; export * from "langchain/retrievers/ensemble"; export * from "langchain/retrievers/multi_query"; export * from "langchain/retrievers/multi_vector"; export * from "langchain/retrievers/parent_document"; export * from "langchain/retrievers/time_weighted"; export * from "langchain/retrievers/document_compressors/chain_extract"; export * from "langchain/retrievers/document_compressors/embeddings_filter"; export * from "langchain/retrievers/hyde"; export * from "langchain/retrievers/score_threshold"; export * from "langchain/retrievers/matryoshka_retriever"; export * from "langchain/stores/doc/base"; export * from "langchain/stores/doc/in_memory"; export * from "langchain/stores/file/in_memory"; export * from "langchain/stores/message/in_memory"; export * from "langchain/storage/encoder_backed"; export * from "langchain/storage/in_memory"; export * from "langchain/util/document"; export * from "langchain/util/math"; export * from "langchain/util/time"; export * from "langchain/experimental/autogpt"; export * from "langchain/experimental/openai_assistant"; export * from "langchain/experimental/openai_files"; export * from "langchain/experimental/babyagi"; export * from "langchain/experimental/generative_agents"; export * from "langchain/experimental/plan_and_execute"; export * from "langchain/experimental/chains/violation_of_expectations"; export * from "langchain/experimental/masking"; export * from "langchain/experimental/prompts/custom_format"; export * from "langchain/evaluation"; export * from "langchain/smith"; export * from "langchain/runnables/remote"; export * from "langchain/indexes"; export * from "langchain/schema/query_constructor"; export * from "langchain/schema/prompt_template";
0
lc_public_repos/langchainjs/environment_tests/test-exports-vite
lc_public_repos/langchainjs/environment_tests/test-exports-vite/public/vite.svg
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/scripts/docker-ci-entrypoint.sh
#!/usr/bin/env bash set -euxo pipefail export CI=true # enable extended globbing for omitting build artifacts shopt -s extglob # avoid copying build artifacts from the host cp -r ../package/!(node_modules|dist|dist-cjs|dist-esm|build|.next|.turbo) . mkdir -p ./libs/langchain-core/ mkdir -p ./libs/langchain-openai/ mkdir -p ./libs/langchain-anthropic/ mkdir -p ./libs/langchain-community/ mkdir -p ./libs/langchain-cohere/ mkdir -p ./libs/langchain/ cp -r ../langchain-core/!(node_modules) ./libs/langchain-core cp -r ../langchain-openai/!(node_modules) ./libs/langchain-openai cp -r ../langchain-anthropic/!(node_modules) ./libs/langchain-anthropic cp -r ../langchain-community/!(node_modules) ./libs/langchain-community cp -r ../langchain-cohere/!(node_modules) ./libs/langchain-cohere cp -r ../langchain/!(node_modules) ./libs/langchain # copy cache mkdir -p ./.yarn cp -r ../root/.yarn/!(berry|cache) ./.yarn cp ../root/yarn.lock ../root/.yarnrc.yml . yarn plugin import workspace-tools yarn workspaces focus --production # Check the build command completes successfully yarn build # Check the test command completes successfully yarn test
0
lc_public_repos/langchainjs/environment_tests
lc_public_repos/langchainjs/environment_tests/scripts/docker-bun-ci-entrypoint.sh
#!/usr/bin/env bash set -euxo pipefail export CI=true # enable extended globbing for omitting build artifacts shopt -s extglob # avoid copying build artifacts from the host cp -r ../package/!(node_modules|dist|dist-cjs|dist-esm|build|.next|.turbo) . mkdir ../langchain-core cp -r ../langchain-core-workspace/!(node_modules|build|.next|.turbo) ../langchain-core mkdir ../langchain cp -r ../langchain-workspace/!(node_modules|build|.next|.turbo) ../langchain # Link the package locally cd ../langchain-core bun link # Reinstall deps with bun because bun doesn't install deps of linked deps bun install --no-save # Link the package locally cd ../langchain sed -i 's/"@langchain\/core": "[^\"]*"/"@langchain\/core": "link:@langchain\/core"/g' package.json bun link # Reinstall deps with bun because bun doesn't install deps of linked deps bun install --no-save cd ../app # Replace the workspace dependency with the local copy, and install all others sed -i 's/"@langchain\/core": "workspace:\*"/"@langchain\/core": "link:@langchain\/core"/g' package.json sed -i 's/"langchain": "workspace:\*"/"langchain": "link:langchain"/g' package.json bun install --no-save # Check the build command completes successfully bun run build # Check the test command completes successfully bun run test
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/tsconfig.json
{ "extends": "@tsconfig/recommended", "compilerOptions": { "outDir": "../dist", "rootDir": "./src", "target": "ES2021", "lib": [ "ES2021", "ES2022.Object", "DOM" ], "module": "ES2020", "moduleResolution": "nodenext", "esModuleInterop": true, "declaration": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "noUnusedLocals": true, "noUnusedParameters": true, "useDefineForClassFields": true, "strictPropertyInitialization": false, "allowJs": true, "strict": true }, "include": [ "src/**/*" ], "exclude": [ "node_modules", "dist", "docs" ] }
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/LICENSE
The MIT License Copyright (c) Harrison Chase Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/jest.config.cjs
/** @type {import('ts-jest').JestConfigWithTsJest} */ module.exports = { preset: "ts-jest/presets/default-esm", testEnvironment: "./jest.env.cjs", modulePathIgnorePatterns: ["dist/", "docs/"], moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, transform: { '^.+\\.tsx?$': ['@swc/jest'], }, transformIgnorePatterns: [ "/node_modules/", "\\.pnp\\.[^\\/]+$", "./scripts/jest-setup-after-env.js", ], setupFiles: ["dotenv/config"], setupFilesAfterEnv: ["./scripts/jest-setup-after-env.js"], testTimeout: 20_000, };
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/babel.config.cjs
// babel.config.js module.exports = { presets: [["@babel/preset-env", { targets: { node: true } }]], };
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/jest.env.cjs
const { TestEnvironment } = require("jest-environment-node"); class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { constructor(config, context) { // Make `instanceof Float32Array` return true in tests // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 super(config, context); this.global.Float32Array = Float32Array; } } module.exports = AdjustedTestEnvironmentToSupportFloat32Array;
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/README.md
# 🦜🍎️ @langchain/core [![CI](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dm/@langchain/core) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) `@langchain/core` contains the core abstractions and schemas of LangChain.js, including base classes for language models, chat models, vectorstores, retrievers, and runnables. ## 💾 Quick Install ```bash $ yarn add @langchain/core ``` ## 🤔 What is this? `@langchain/core` contains the base abstractions that power the rest of the LangChain ecosystem. These abstractions are designed to be as modular and simple as possible. Examples of these abstractions include those for language models, document loaders, embedding models, vectorstores, retrievers, and more. The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem. For example, you can install other provider-specific packages like this: ```bash $ yarn add @langchain/openai ``` And use them as follows: ```typescript import { StringOutputParser } from "@langchain/core/output_parsers"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { ChatOpenAI } from "@langchain/openai"; const prompt = ChatPromptTemplate.fromTemplate( `Answer the following question to the best of your ability:\n{question}` ); const model = new ChatOpenAI({ temperature: 0.8, }); const outputParser = new StringOutputParser(); const chain = prompt.pipe(model).pipe(outputParser); const stream = await chain.stream({ question: "Why is the sky blue?", }); for await (const chunk of stream) { console.log(chunk); } /* The sky appears blue because of a phenomenon known as Ray leigh scattering */ ``` Note that for compatibility, all used LangChain packages (including the base LangChain package, which itself depends on core!) must share the same version of `@langchain/core`. This means that you may need to install/resolve a specific version of `@langchain/core` that matches the dependencies of your used packages. ## 🔗 What is LangChain Expression Language? LangChain Core also contains LangChain Expression Language, or LCEL, a runtime that allows users to compose arbitrary sequences together and get several benefits that are important when building LLM applications. We call these sequences “runnables”. All runnables expose the same interface with single-invocation, batch, streaming and async methods. This design is useful because it is not enough to have a single sync interface when building an LLM application. Batch is needed for efficient processing of many inputs. Streaming (and streaming of intermediate steps) is needed to show the user that progress is being made. Async interfaces are nice when moving into production. Rather than having to write multiple implementations for all of those, LCEL allows you to write a runnable once and invoke it in many different ways. For more check out the [LCEL docs](https://js.langchain.com/docs/concepts/lcel). ![LangChain Stack](../docs/core_docs/static/svg/langchain_stack_062024.svg) ## 📕 Releases & Versioning `@langchain/core` is currently on version `0.3.x`. As `@langchain/core` contains the base abstractions and runtime for the whole LangChain ecosystem, we will communicate any breaking changes with advance notice and version bumps. The exception for this is anything in `@langchain/core/beta`. The reason for `@langchain/core/beta` is that given the rate of change of the field, being able to move quickly is still a priority, and this module is our attempt to do so. Minor version increases will occur for: - Breaking changes for any public interfaces NOT in `@langchain/core/beta` Patch version increases will occur for: - Bug fixes - New features - Any changes to private interfaces - Any changes to `@langchain/core/beta` ## 📦 Creating your own package Other LangChain packages should add this package as a dependency and extend the classes within. For an example, see the [@langchain/anthropic](https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-anthropic) in this repo. Because all used packages must share the same version of core, packages should never directly depend on `@langchain/core`. Instead they should have core as a peer dependency and a dev dependency. We suggest using a tilde dependency to allow for different (backwards-compatible) patch versions: ```json { "name": "@langchain/anthropic", "version": "0.0.3", "description": "Anthropic integrations for LangChain.js", "type": "module", "author": "LangChain", "license": "MIT", "dependencies": { "@anthropic-ai/sdk": "^0.10.0" }, "peerDependencies": { "@langchain/core": "~0.3.0" }, "devDependencies": { "@langchain/core": "~0.3.0" } } ``` This recommendation will change to a caret once a major version (1.x.x) release has occurred. We suggest making all packages cross-compatible with ESM and CJS using a build step like the one in [@langchain/anthropic](https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-anthropic), then running `yarn build` before running `npm publish`. We will be exploring how to make this process easier in the future. ## 💁 Contributing Because `@langchain/core` is a low-level package whose abstractions will change infrequently, most contributions should be made in the higher-level LangChain package. Bugfixes or suggestions should be made using the same guidelines as the main package. See [here](https://github.com/langchain-ai/langchainjs/tree/main/CONTRIBUTING.md) for detailed information. Please report any security issues or concerns following our [security guidelines](https://github.com/langchain-ai/langchainjs/tree/main/SECURITY.md).
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/.release-it.json
{ "github": { "release": true, "autoGenerate": true, "tokenRef": "GITHUB_TOKEN_RELEASE" }, "npm": { "versionArgs": [ "--workspaces-update=false" ] } }
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/.eslintrc.cjs
module.exports = { extends: [ "airbnb-base", "eslint:recommended", "prettier", "plugin:@typescript-eslint/recommended", ], parserOptions: { ecmaVersion: 12, parser: "@typescript-eslint/parser", project: "./tsconfig.json", sourceType: "module", }, plugins: ["@typescript-eslint", "no-instanceof", "eslint-plugin-jest"], ignorePatterns: [ "src/utils/@cfworker", "src/utils/fast-json-patch", "src/utils/js-sha1", "src/utils/sax-js", ".eslintrc.cjs", "scripts", "node_modules", "dist", "dist-cjs", "*.js", "*.cjs", "*.d.ts", ], rules: { "no-process-env": 2, "no-instanceof/no-instanceof": 2, "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, "@typescript-eslint/no-shadow": 0, "@typescript-eslint/no-empty-interface": 0, "@typescript-eslint/no-use-before-define": ["error", "nofunc"], "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", "@typescript-eslint/no-this-alias": 0, camelcase: 0, "class-methods-use-this": 0, "import/extensions": [2, "ignorePackages"], "import/no-extraneous-dependencies": [ "error", { devDependencies: ["**/*.test.ts"] }, ], "import/no-unresolved": 0, "import/prefer-default-export": 0, "keyword-spacing": "error", "max-classes-per-file": 0, "max-len": 0, "no-await-in-loop": 0, "no-bitwise": 0, "no-console": 0, "no-empty-function": 0, "no-restricted-syntax": 0, "no-shadow": 0, "no-continue": 0, "no-void": 0, "no-underscore-dangle": 0, "no-use-before-define": 0, "no-useless-constructor": 0, "no-return-await": 0, "no-plusplus": 0, "consistent-return": 0, "no-else-return": 0, "func-names": 0, "no-lonely-if": 0, "prefer-rest-params": 0, "new-cap": ["error", { properties: false, capIsNew: false }], 'jest/no-focused-tests': 'error', "arrow-body-style": 0, "prefer-destructuring": 0, }, overrides: [ { files: ['**/*.test.ts'], rules: { '@typescript-eslint/no-unused-vars': 'off' } } ] };
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/langchain.config.js
import { resolve, dirname } from "node:path"; import { fileURLToPath } from "node:url"; /** * @param {string} relativePath * @returns {string} */ function abs(relativePath) { return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); } export const config = { internals: [/node\:/, /js-tiktoken/, /langsmith/], entrypoints: { agents: "agents", caches: "caches/base", "callbacks/base": "callbacks/base", "callbacks/dispatch": "callbacks/dispatch/index", "callbacks/dispatch/web": "callbacks/dispatch/web", "callbacks/manager": "callbacks/manager", "callbacks/promises": "callbacks/promises", chat_history: "chat_history", context: "context", documents: "documents/index", "document_loaders/base": "document_loaders/base", "document_loaders/langsmith": "document_loaders/langsmith", embeddings: "embeddings", example_selectors: "example_selectors/index", indexing: "indexing/index", "language_models/base": "language_models/base", "language_models/chat_models": "language_models/chat_models", "language_models/llms": "language_models/llms", load: "load/index", "load/serializable": "load/serializable", memory: "memory", messages: "messages/index", "messages/tool": "messages/tool", output_parsers: "output_parsers/index", "output_parsers/openai_tools": "output_parsers/openai_tools/index", "output_parsers/openai_functions": "output_parsers/openai_functions/index", outputs: "outputs", prompts: "prompts/index", prompt_values: "prompt_values", runnables: "runnables/index", "runnables/graph": "runnables/graph", "runnables/remote": "runnables/remote", retrievers: "retrievers/index", "retrievers/document_compressors": "retrievers/document_compressors/base", singletons: "singletons/index", stores: "stores", "structured_query": "structured_query/index", tools: "tools/index", "tracers/base": "tracers/base", "tracers/console": "tracers/console", "tracers/initialize": "tracers/initialize", "tracers/log_stream": "tracers/log_stream", "tracers/run_collector": "tracers/run_collector", "tracers/tracer_langchain": "tracers/tracer_langchain", "tracers/tracer_langchain_v1": "tracers/tracer_langchain_v1", "utils/async_caller": "utils/async_caller", "utils/chunk_array": "utils/chunk_array", "utils/env": "utils/env", "utils/event_source_parse": "utils/event_source_parse", "utils/function_calling": "utils/function_calling", "utils/hash": "utils/hash", "utils/json_patch": "utils/json_patch", "utils/json_schema": "utils/json_schema", "utils/math": "utils/math", "utils/stream": "utils/stream", "utils/testing": "utils/testing/index", "utils/tiktoken": "utils/tiktoken", "utils/types": "utils/types/index", vectorstores: "vectorstores", }, tsConfigPath: resolve("./tsconfig.json"), packageSuffix: "core", cjsSource: "./dist-cjs", cjsDestination: "./dist", abs, }
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/.prettierignore
src/load/import_type.ts
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/package.json
{ "name": "@langchain/core", "version": "0.3.22", "description": "Core LangChain.js abstractions and schemas", "type": "module", "engines": { "node": ">=18" }, "main": "./index.js", "types": "./index.d.ts", "repository": { "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/langchain-core/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/core", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", "clean": "rm -rf .turbo dist/", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", "lint": "yarn lint:eslint && yarn lint:dpdm", "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", "prepack": "yarn build", "release": "release-it --only-version --config .release-it.json", "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:integration": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "format": "prettier --config .prettierrc --write \"src\"", "format:check": "prettier --config .prettierrc --check \"src\"" }, "author": "LangChain", "license": "MIT", "dependencies": { "ansi-styles": "^5.0.0", "camelcase": "6", "decamelize": "1.2.0", "js-tiktoken": "^1.0.12", "langsmith": "^0.2.8", "mustache": "^4.2.0", "p-queue": "^6.6.2", "p-retry": "4", "uuid": "^10.0.0", "zod": "^3.22.4", "zod-to-json-schema": "^3.22.3" }, "devDependencies": { "@jest/globals": "^29.5.0", "@langchain/scripts": ">=0.1.0 <0.2.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@types/decamelize": "^1.2.0", "@types/mustache": "^4", "dpdm": "^3.12.0", "eslint": "^8.33.0", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-jest": "^27.6.0", "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", "jest-environment-node": "^29.6.4", "ml-matrix": "^6.10.4", "prettier": "^2.8.3", "release-it": "^17.6.0", "rimraf": "^5.0.1", "ts-jest": "^29.1.0", "typescript": "~5.1.6", "web-streams-polyfill": "^4.0.0" }, "publishConfig": { "access": "public" }, "keywords": [ "llm", "ai", "gpt3", "chain", "prompt", "prompt engineering", "chatgpt", "machine learning", "ml", "openai", "embeddings", "vectorstores" ], "exports": { "./agents": { "types": { "import": "./agents.d.ts", "require": "./agents.d.cts", "default": "./agents.d.ts" }, "import": "./agents.js", "require": "./agents.cjs" }, "./caches": { "types": { "import": "./caches.d.ts", "require": "./caches.d.cts", "default": "./caches.d.ts" }, "import": "./caches.js", "require": "./caches.cjs" }, "./callbacks/base": { "types": { "import": "./callbacks/base.d.ts", "require": "./callbacks/base.d.cts", "default": "./callbacks/base.d.ts" }, "import": "./callbacks/base.js", "require": "./callbacks/base.cjs" }, "./callbacks/dispatch": { "types": { "import": "./callbacks/dispatch.d.ts", "require": "./callbacks/dispatch.d.cts", "default": "./callbacks/dispatch.d.ts" }, "import": "./callbacks/dispatch.js", "require": "./callbacks/dispatch.cjs" }, "./callbacks/dispatch/web": { "types": { "import": "./callbacks/dispatch/web.d.ts", "require": "./callbacks/dispatch/web.d.cts", "default": "./callbacks/dispatch/web.d.ts" }, "import": "./callbacks/dispatch/web.js", "require": "./callbacks/dispatch/web.cjs" }, "./callbacks/manager": { "types": { "import": "./callbacks/manager.d.ts", "require": "./callbacks/manager.d.cts", "default": "./callbacks/manager.d.ts" }, "import": "./callbacks/manager.js", "require": "./callbacks/manager.cjs" }, "./callbacks/promises": { "types": { "import": "./callbacks/promises.d.ts", "require": "./callbacks/promises.d.cts", "default": "./callbacks/promises.d.ts" }, "import": "./callbacks/promises.js", "require": "./callbacks/promises.cjs" }, "./chat_history": { "types": { "import": "./chat_history.d.ts", "require": "./chat_history.d.cts", "default": "./chat_history.d.ts" }, "import": "./chat_history.js", "require": "./chat_history.cjs" }, "./context": { "types": { "import": "./context.d.ts", "require": "./context.d.cts", "default": "./context.d.ts" }, "import": "./context.js", "require": "./context.cjs" }, "./documents": { "types": { "import": "./documents.d.ts", "require": "./documents.d.cts", "default": "./documents.d.ts" }, "import": "./documents.js", "require": "./documents.cjs" }, "./document_loaders/base": { "types": { "import": "./document_loaders/base.d.ts", "require": "./document_loaders/base.d.cts", "default": "./document_loaders/base.d.ts" }, "import": "./document_loaders/base.js", "require": "./document_loaders/base.cjs" }, "./document_loaders/langsmith": { "types": { "import": "./document_loaders/langsmith.d.ts", "require": "./document_loaders/langsmith.d.cts", "default": "./document_loaders/langsmith.d.ts" }, "import": "./document_loaders/langsmith.js", "require": "./document_loaders/langsmith.cjs" }, "./embeddings": { "types": { "import": "./embeddings.d.ts", "require": "./embeddings.d.cts", "default": "./embeddings.d.ts" }, "import": "./embeddings.js", "require": "./embeddings.cjs" }, "./example_selectors": { "types": { "import": "./example_selectors.d.ts", "require": "./example_selectors.d.cts", "default": "./example_selectors.d.ts" }, "import": "./example_selectors.js", "require": "./example_selectors.cjs" }, "./indexing": { "types": { "import": "./indexing.d.ts", "require": "./indexing.d.cts", "default": "./indexing.d.ts" }, "import": "./indexing.js", "require": "./indexing.cjs" }, "./language_models/base": { "types": { "import": "./language_models/base.d.ts", "require": "./language_models/base.d.cts", "default": "./language_models/base.d.ts" }, "import": "./language_models/base.js", "require": "./language_models/base.cjs" }, "./language_models/chat_models": { "types": { "import": "./language_models/chat_models.d.ts", "require": "./language_models/chat_models.d.cts", "default": "./language_models/chat_models.d.ts" }, "import": "./language_models/chat_models.js", "require": "./language_models/chat_models.cjs" }, "./language_models/llms": { "types": { "import": "./language_models/llms.d.ts", "require": "./language_models/llms.d.cts", "default": "./language_models/llms.d.ts" }, "import": "./language_models/llms.js", "require": "./language_models/llms.cjs" }, "./load": { "types": { "import": "./load.d.ts", "require": "./load.d.cts", "default": "./load.d.ts" }, "import": "./load.js", "require": "./load.cjs" }, "./load/serializable": { "types": { "import": "./load/serializable.d.ts", "require": "./load/serializable.d.cts", "default": "./load/serializable.d.ts" }, "import": "./load/serializable.js", "require": "./load/serializable.cjs" }, "./memory": { "types": { "import": "./memory.d.ts", "require": "./memory.d.cts", "default": "./memory.d.ts" }, "import": "./memory.js", "require": "./memory.cjs" }, "./messages": { "types": { "import": "./messages.d.ts", "require": "./messages.d.cts", "default": "./messages.d.ts" }, "import": "./messages.js", "require": "./messages.cjs" }, "./messages/tool": { "types": { "import": "./messages/tool.d.ts", "require": "./messages/tool.d.cts", "default": "./messages/tool.d.ts" }, "import": "./messages/tool.js", "require": "./messages/tool.cjs" }, "./output_parsers": { "types": { "import": "./output_parsers.d.ts", "require": "./output_parsers.d.cts", "default": "./output_parsers.d.ts" }, "import": "./output_parsers.js", "require": "./output_parsers.cjs" }, "./output_parsers/openai_tools": { "types": { "import": "./output_parsers/openai_tools.d.ts", "require": "./output_parsers/openai_tools.d.cts", "default": "./output_parsers/openai_tools.d.ts" }, "import": "./output_parsers/openai_tools.js", "require": "./output_parsers/openai_tools.cjs" }, "./output_parsers/openai_functions": { "types": { "import": "./output_parsers/openai_functions.d.ts", "require": "./output_parsers/openai_functions.d.cts", "default": "./output_parsers/openai_functions.d.ts" }, "import": "./output_parsers/openai_functions.js", "require": "./output_parsers/openai_functions.cjs" }, "./outputs": { "types": { "import": "./outputs.d.ts", "require": "./outputs.d.cts", "default": "./outputs.d.ts" }, "import": "./outputs.js", "require": "./outputs.cjs" }, "./prompts": { "types": { "import": "./prompts.d.ts", "require": "./prompts.d.cts", "default": "./prompts.d.ts" }, "import": "./prompts.js", "require": "./prompts.cjs" }, "./prompt_values": { "types": { "import": "./prompt_values.d.ts", "require": "./prompt_values.d.cts", "default": "./prompt_values.d.ts" }, "import": "./prompt_values.js", "require": "./prompt_values.cjs" }, "./runnables": { "types": { "import": "./runnables.d.ts", "require": "./runnables.d.cts", "default": "./runnables.d.ts" }, "import": "./runnables.js", "require": "./runnables.cjs" }, "./runnables/graph": { "types": { "import": "./runnables/graph.d.ts", "require": "./runnables/graph.d.cts", "default": "./runnables/graph.d.ts" }, "import": "./runnables/graph.js", "require": "./runnables/graph.cjs" }, "./runnables/remote": { "types": { "import": "./runnables/remote.d.ts", "require": "./runnables/remote.d.cts", "default": "./runnables/remote.d.ts" }, "import": "./runnables/remote.js", "require": "./runnables/remote.cjs" }, "./retrievers": { "types": { "import": "./retrievers.d.ts", "require": "./retrievers.d.cts", "default": "./retrievers.d.ts" }, "import": "./retrievers.js", "require": "./retrievers.cjs" }, "./retrievers/document_compressors": { "types": { "import": "./retrievers/document_compressors.d.ts", "require": "./retrievers/document_compressors.d.cts", "default": "./retrievers/document_compressors.d.ts" }, "import": "./retrievers/document_compressors.js", "require": "./retrievers/document_compressors.cjs" }, "./singletons": { "types": { "import": "./singletons.d.ts", "require": "./singletons.d.cts", "default": "./singletons.d.ts" }, "import": "./singletons.js", "require": "./singletons.cjs" }, "./stores": { "types": { "import": "./stores.d.ts", "require": "./stores.d.cts", "default": "./stores.d.ts" }, "import": "./stores.js", "require": "./stores.cjs" }, "./structured_query": { "types": { "import": "./structured_query.d.ts", "require": "./structured_query.d.cts", "default": "./structured_query.d.ts" }, "import": "./structured_query.js", "require": "./structured_query.cjs" }, "./tools": { "types": { "import": "./tools.d.ts", "require": "./tools.d.cts", "default": "./tools.d.ts" }, "import": "./tools.js", "require": "./tools.cjs" }, "./tracers/base": { "types": { "import": "./tracers/base.d.ts", "require": "./tracers/base.d.cts", "default": "./tracers/base.d.ts" }, "import": "./tracers/base.js", "require": "./tracers/base.cjs" }, "./tracers/console": { "types": { "import": "./tracers/console.d.ts", "require": "./tracers/console.d.cts", "default": "./tracers/console.d.ts" }, "import": "./tracers/console.js", "require": "./tracers/console.cjs" }, "./tracers/initialize": { "types": { "import": "./tracers/initialize.d.ts", "require": "./tracers/initialize.d.cts", "default": "./tracers/initialize.d.ts" }, "import": "./tracers/initialize.js", "require": "./tracers/initialize.cjs" }, "./tracers/log_stream": { "types": { "import": "./tracers/log_stream.d.ts", "require": "./tracers/log_stream.d.cts", "default": "./tracers/log_stream.d.ts" }, "import": "./tracers/log_stream.js", "require": "./tracers/log_stream.cjs" }, "./tracers/run_collector": { "types": { "import": "./tracers/run_collector.d.ts", "require": "./tracers/run_collector.d.cts", "default": "./tracers/run_collector.d.ts" }, "import": "./tracers/run_collector.js", "require": "./tracers/run_collector.cjs" }, "./tracers/tracer_langchain": { "types": { "import": "./tracers/tracer_langchain.d.ts", "require": "./tracers/tracer_langchain.d.cts", "default": "./tracers/tracer_langchain.d.ts" }, "import": "./tracers/tracer_langchain.js", "require": "./tracers/tracer_langchain.cjs" }, "./tracers/tracer_langchain_v1": { "types": { "import": "./tracers/tracer_langchain_v1.d.ts", "require": "./tracers/tracer_langchain_v1.d.cts", "default": "./tracers/tracer_langchain_v1.d.ts" }, "import": "./tracers/tracer_langchain_v1.js", "require": "./tracers/tracer_langchain_v1.cjs" }, "./utils/async_caller": { "types": { "import": "./utils/async_caller.d.ts", "require": "./utils/async_caller.d.cts", "default": "./utils/async_caller.d.ts" }, "import": "./utils/async_caller.js", "require": "./utils/async_caller.cjs" }, "./utils/chunk_array": { "types": { "import": "./utils/chunk_array.d.ts", "require": "./utils/chunk_array.d.cts", "default": "./utils/chunk_array.d.ts" }, "import": "./utils/chunk_array.js", "require": "./utils/chunk_array.cjs" }, "./utils/env": { "types": { "import": "./utils/env.d.ts", "require": "./utils/env.d.cts", "default": "./utils/env.d.ts" }, "import": "./utils/env.js", "require": "./utils/env.cjs" }, "./utils/event_source_parse": { "types": { "import": "./utils/event_source_parse.d.ts", "require": "./utils/event_source_parse.d.cts", "default": "./utils/event_source_parse.d.ts" }, "import": "./utils/event_source_parse.js", "require": "./utils/event_source_parse.cjs" }, "./utils/function_calling": { "types": { "import": "./utils/function_calling.d.ts", "require": "./utils/function_calling.d.cts", "default": "./utils/function_calling.d.ts" }, "import": "./utils/function_calling.js", "require": "./utils/function_calling.cjs" }, "./utils/hash": { "types": { "import": "./utils/hash.d.ts", "require": "./utils/hash.d.cts", "default": "./utils/hash.d.ts" }, "import": "./utils/hash.js", "require": "./utils/hash.cjs" }, "./utils/json_patch": { "types": { "import": "./utils/json_patch.d.ts", "require": "./utils/json_patch.d.cts", "default": "./utils/json_patch.d.ts" }, "import": "./utils/json_patch.js", "require": "./utils/json_patch.cjs" }, "./utils/json_schema": { "types": { "import": "./utils/json_schema.d.ts", "require": "./utils/json_schema.d.cts", "default": "./utils/json_schema.d.ts" }, "import": "./utils/json_schema.js", "require": "./utils/json_schema.cjs" }, "./utils/math": { "types": { "import": "./utils/math.d.ts", "require": "./utils/math.d.cts", "default": "./utils/math.d.ts" }, "import": "./utils/math.js", "require": "./utils/math.cjs" }, "./utils/stream": { "types": { "import": "./utils/stream.d.ts", "require": "./utils/stream.d.cts", "default": "./utils/stream.d.ts" }, "import": "./utils/stream.js", "require": "./utils/stream.cjs" }, "./utils/testing": { "types": { "import": "./utils/testing.d.ts", "require": "./utils/testing.d.cts", "default": "./utils/testing.d.ts" }, "import": "./utils/testing.js", "require": "./utils/testing.cjs" }, "./utils/tiktoken": { "types": { "import": "./utils/tiktoken.d.ts", "require": "./utils/tiktoken.d.cts", "default": "./utils/tiktoken.d.ts" }, "import": "./utils/tiktoken.js", "require": "./utils/tiktoken.cjs" }, "./utils/types": { "types": { "import": "./utils/types.d.ts", "require": "./utils/types.d.cts", "default": "./utils/types.d.ts" }, "import": "./utils/types.js", "require": "./utils/types.cjs" }, "./vectorstores": { "types": { "import": "./vectorstores.d.ts", "require": "./vectorstores.d.cts", "default": "./vectorstores.d.ts" }, "import": "./vectorstores.js", "require": "./vectorstores.cjs" }, "./package.json": "./package.json" }, "files": [ "dist/", "agents.cjs", "agents.js", "agents.d.ts", "agents.d.cts", "caches.cjs", "caches.js", "caches.d.ts", "caches.d.cts", "callbacks/base.cjs", "callbacks/base.js", "callbacks/base.d.ts", "callbacks/base.d.cts", "callbacks/dispatch.cjs", "callbacks/dispatch.js", "callbacks/dispatch.d.ts", "callbacks/dispatch.d.cts", "callbacks/dispatch/web.cjs", "callbacks/dispatch/web.js", "callbacks/dispatch/web.d.ts", "callbacks/dispatch/web.d.cts", "callbacks/manager.cjs", "callbacks/manager.js", "callbacks/manager.d.ts", "callbacks/manager.d.cts", "callbacks/promises.cjs", "callbacks/promises.js", "callbacks/promises.d.ts", "callbacks/promises.d.cts", "chat_history.cjs", "chat_history.js", "chat_history.d.ts", "chat_history.d.cts", "context.cjs", "context.js", "context.d.ts", "context.d.cts", "documents.cjs", "documents.js", "documents.d.ts", "documents.d.cts", "document_loaders/base.cjs", "document_loaders/base.js", "document_loaders/base.d.ts", "document_loaders/base.d.cts", "document_loaders/langsmith.cjs", "document_loaders/langsmith.js", "document_loaders/langsmith.d.ts", "document_loaders/langsmith.d.cts", "embeddings.cjs", "embeddings.js", "embeddings.d.ts", "embeddings.d.cts", "example_selectors.cjs", "example_selectors.js", "example_selectors.d.ts", "example_selectors.d.cts", "indexing.cjs", "indexing.js", "indexing.d.ts", "indexing.d.cts", "language_models/base.cjs", "language_models/base.js", "language_models/base.d.ts", "language_models/base.d.cts", "language_models/chat_models.cjs", "language_models/chat_models.js", "language_models/chat_models.d.ts", "language_models/chat_models.d.cts", "language_models/llms.cjs", "language_models/llms.js", "language_models/llms.d.ts", "language_models/llms.d.cts", "load.cjs", "load.js", "load.d.ts", "load.d.cts", "load/serializable.cjs", "load/serializable.js", "load/serializable.d.ts", "load/serializable.d.cts", "memory.cjs", "memory.js", "memory.d.ts", "memory.d.cts", "messages.cjs", "messages.js", "messages.d.ts", "messages.d.cts", "messages/tool.cjs", "messages/tool.js", "messages/tool.d.ts", "messages/tool.d.cts", "output_parsers.cjs", "output_parsers.js", "output_parsers.d.ts", "output_parsers.d.cts", "output_parsers/openai_tools.cjs", "output_parsers/openai_tools.js", "output_parsers/openai_tools.d.ts", "output_parsers/openai_tools.d.cts", "output_parsers/openai_functions.cjs", "output_parsers/openai_functions.js", "output_parsers/openai_functions.d.ts", "output_parsers/openai_functions.d.cts", "outputs.cjs", "outputs.js", "outputs.d.ts", "outputs.d.cts", "prompts.cjs", "prompts.js", "prompts.d.ts", "prompts.d.cts", "prompt_values.cjs", "prompt_values.js", "prompt_values.d.ts", "prompt_values.d.cts", "runnables.cjs", "runnables.js", "runnables.d.ts", "runnables.d.cts", "runnables/graph.cjs", "runnables/graph.js", "runnables/graph.d.ts", "runnables/graph.d.cts", "runnables/remote.cjs", "runnables/remote.js", "runnables/remote.d.ts", "runnables/remote.d.cts", "retrievers.cjs", "retrievers.js", "retrievers.d.ts", "retrievers.d.cts", "retrievers/document_compressors.cjs", "retrievers/document_compressors.js", "retrievers/document_compressors.d.ts", "retrievers/document_compressors.d.cts", "singletons.cjs", "singletons.js", "singletons.d.ts", "singletons.d.cts", "stores.cjs", "stores.js", "stores.d.ts", "stores.d.cts", "structured_query.cjs", "structured_query.js", "structured_query.d.ts", "structured_query.d.cts", "tools.cjs", "tools.js", "tools.d.ts", "tools.d.cts", "tracers/base.cjs", "tracers/base.js", "tracers/base.d.ts", "tracers/base.d.cts", "tracers/console.cjs", "tracers/console.js", "tracers/console.d.ts", "tracers/console.d.cts", "tracers/initialize.cjs", "tracers/initialize.js", "tracers/initialize.d.ts", "tracers/initialize.d.cts", "tracers/log_stream.cjs", "tracers/log_stream.js", "tracers/log_stream.d.ts", "tracers/log_stream.d.cts", "tracers/run_collector.cjs", "tracers/run_collector.js", "tracers/run_collector.d.ts", "tracers/run_collector.d.cts", "tracers/tracer_langchain.cjs", "tracers/tracer_langchain.js", "tracers/tracer_langchain.d.ts", "tracers/tracer_langchain.d.cts", "tracers/tracer_langchain_v1.cjs", "tracers/tracer_langchain_v1.js", "tracers/tracer_langchain_v1.d.ts", "tracers/tracer_langchain_v1.d.cts", "utils/async_caller.cjs", "utils/async_caller.js", "utils/async_caller.d.ts", "utils/async_caller.d.cts", "utils/chunk_array.cjs", "utils/chunk_array.js", "utils/chunk_array.d.ts", "utils/chunk_array.d.cts", "utils/env.cjs", "utils/env.js", "utils/env.d.ts", "utils/env.d.cts", "utils/event_source_parse.cjs", "utils/event_source_parse.js", "utils/event_source_parse.d.ts", "utils/event_source_parse.d.cts", "utils/function_calling.cjs", "utils/function_calling.js", "utils/function_calling.d.ts", "utils/function_calling.d.cts", "utils/hash.cjs", "utils/hash.js", "utils/hash.d.ts", "utils/hash.d.cts", "utils/json_patch.cjs", "utils/json_patch.js", "utils/json_patch.d.ts", "utils/json_patch.d.cts", "utils/json_schema.cjs", "utils/json_schema.js", "utils/json_schema.d.ts", "utils/json_schema.d.cts", "utils/math.cjs", "utils/math.js", "utils/math.d.ts", "utils/math.d.cts", "utils/stream.cjs", "utils/stream.js", "utils/stream.d.ts", "utils/stream.d.cts", "utils/testing.cjs", "utils/testing.js", "utils/testing.d.ts", "utils/testing.d.cts", "utils/tiktoken.cjs", "utils/tiktoken.js", "utils/tiktoken.d.ts", "utils/tiktoken.d.cts", "utils/types.cjs", "utils/types.js", "utils/types.d.ts", "utils/types.d.cts", "vectorstores.cjs", "vectorstores.js", "vectorstores.d.ts", "vectorstores.d.cts" ] }
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/tsconfig.cjs.json
{ "extends": "./tsconfig.json", "compilerOptions": { "module": "commonjs", "declaration": false }, "exclude": [ "node_modules", "dist", "docs", "**/tests" ] }
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/turbo.json
{ "extends": ["//"], "pipeline": { "build": { "outputs": ["**/dist/**"] }, "build:internal": { "dependsOn": ["^build:internal"] } } }
0
lc_public_repos/langchainjs
lc_public_repos/langchainjs/langchain-core/.prettierrc
{ "$schema": "https://json.schemastore.org/prettierrc", "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": false, "quoteProps": "as-needed", "jsxSingleQuote": false, "trailingComma": "es5", "bracketSpacing": true, "arrowParens": "always", "requirePragma": false, "insertPragma": false, "proseWrap": "preserve", "htmlWhitespaceSensitivity": "css", "vueIndentScriptAndStyle": false, "endOfLine": "lf" }
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/src/agents.ts
export type AgentAction = { tool: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any toolInput: string | Record<string, any>; log: string; }; export type AgentFinish = { // eslint-disable-next-line @typescript-eslint/no-explicit-any returnValues: Record<string, any>; log: string; }; export type AgentStep = { action: AgentAction; observation: string; };
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/src/vectorstores.ts
import type { EmbeddingsInterface } from "./embeddings.js"; import type { DocumentInterface } from "./documents/document.js"; import { BaseRetriever, BaseRetrieverInterface, type BaseRetrieverInput, } from "./retrievers/index.js"; import { Serializable } from "./load/serializable.js"; import { CallbackManagerForRetrieverRun, Callbacks, } from "./callbacks/manager.js"; /** * Type for options when adding a document to the VectorStore. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any type AddDocumentOptions = Record<string, any>; /** * Options for configuring a maximal marginal relevance (MMR) search. * * MMR search optimizes for both similarity to the query and diversity * among the results, balancing the retrieval of relevant documents * with variation in the content returned. * * Fields: * * - `fetchK` (optional): The initial number of documents to retrieve from the * vector store before applying the MMR algorithm. This larger set provides a * pool of documents from which the algorithm can select the most diverse * results based on relevance to the query. * * - `filter` (optional): A filter of type `FilterType` to refine the search * results, allowing additional conditions to target specific subsets * of documents. * * - `k`: The number of documents to return in the final results. This is the * primary count of documents that are most relevant to the query. * * - `lambda` (optional): A value between 0 and 1 that determines the balance * between relevance and diversity: * - A `lambda` of 0 emphasizes diversity, maximizing content variation. * - A `lambda` of 1 emphasizes similarity to the query, focusing on relevance. * Values between 0 and 1 provide a mix of relevance and diversity. * * @template FilterType - The type used for filtering results, as defined * by the vector store. */ export type MaxMarginalRelevanceSearchOptions<FilterType> = { k: number; fetchK?: number; lambda?: number; filter?: FilterType; }; /** * Options for configuring a maximal marginal relevance (MMR) search * when using the `VectorStoreRetriever`. * * These parameters control how the MMR algorithm balances relevance to the * query and diversity among the retrieved documents. * * Fields: * - `fetchK` (optional): Specifies the initial number of documents to fetch * before applying the MMR algorithm. This larger set provides a pool of * documents from which the algorithm can select the most diverse results * based on relevance to the query. * * - `lambda` (optional): A value between 0 and 1 that determines the balance * between relevance and diversity: * - A `lambda` of 0 maximizes diversity among the results, prioritizing varied content. * - A `lambda` of 1 maximizes similarity to the query, prioritizing relevance. * Values between 0 and 1 provide a mix of relevance and diversity. */ export type VectorStoreRetrieverMMRSearchKwargs = { fetchK?: number; lambda?: number; }; /** * Input configuration options for creating a `VectorStoreRetriever` instance. * * This type combines properties from `BaseRetrieverInput` with specific settings * for the `VectorStoreRetriever`, including options for similarity or maximal * marginal relevance (MMR) search types. * * Fields: * * - `callbacks` (optional): An array of callback functions that handle various * events during retrieval, such as logging, error handling, or progress updates. * * - `tags` (optional): An array of strings used to add contextual tags to * retrieval operations, allowing for easier categorization and tracking. * * - `metadata` (optional): A record of key-value pairs to store additional * contextual information for retrieval operations, which can be useful * for logging or auditing purposes. * * - `verbose` (optional): A boolean flag that, if set to `true`, enables * detailed logging and output during the retrieval process. Defaults to `false`. * * - `vectorStore`: The `VectorStore` instance implementing `VectorStoreInterface` * that will be used for document storage and retrieval. * * - `k` (optional): Specifies the number of documents to retrieve per search * query. Defaults to 4 if not specified. * * - `filter` (optional): A filter of type `FilterType` (defined by the vector store) * to refine the set of documents returned, allowing for targeted search results. * * - `searchType`: Determines the type of search to perform: * - `"similarity"`: Executes a similarity search, retrieving documents based purely * on vector similarity to the query. * - `"mmr"`: Executes a maximal marginal relevance (MMR) search, balancing similarity * and diversity in the search results. * * - `searchKwargs` (optional): Used only if `searchType` is `"mmr"`, this object * provides additional options for MMR search, including: * - `fetchK`: Specifies the number of documents to initially fetch before applying * the MMR algorithm, providing a pool from which the most diverse results are selected. * - `lambda`: A diversity parameter, where 0 emphasizes diversity and 1 emphasizes * relevance to the query. Values between 0 and 1 provide a balance of relevance and diversity. * * @template V - The type of vector store implementing `VectorStoreInterface`. */ export type VectorStoreRetrieverInput<V extends VectorStoreInterface> = BaseRetrieverInput & ( | { vectorStore: V; k?: number; filter?: V["FilterType"]; searchType?: "similarity"; } | { vectorStore: V; k?: number; filter?: V["FilterType"]; searchType: "mmr"; searchKwargs?: VectorStoreRetrieverMMRSearchKwargs; } ); /** * Interface for a retriever that uses a vector store to store and retrieve * document embeddings. This retriever interface allows for adding documents * to the underlying vector store and conducting retrieval operations. * * `VectorStoreRetrieverInterface` extends `BaseRetrieverInterface` to provide * document retrieval capabilities based on vector similarity. * * @interface VectorStoreRetrieverInterface * @extends BaseRetrieverInterface */ export interface VectorStoreRetrieverInterface< V extends VectorStoreInterface = VectorStoreInterface > extends BaseRetrieverInterface { vectorStore: V; /** * Adds an array of documents to the vector store. * * This method embeds the provided documents and stores them within the * vector store. Additional options can be specified for custom behavior * during the addition process. * * @param documents - An array of documents to embed and add to the vector store. * @param options - Optional settings to customize document addition. * @returns A promise that resolves to an array of document IDs or `void`, * depending on the implementation. */ addDocuments( documents: DocumentInterface[], options?: AddDocumentOptions ): Promise<string[] | void>; } /** * Class for retrieving documents from a `VectorStore` based on vector similarity * or maximal marginal relevance (MMR). * * `VectorStoreRetriever` extends `BaseRetriever`, implementing methods for * adding documents to the underlying vector store and performing document * retrieval with optional configurations. * * @class VectorStoreRetriever * @extends BaseRetriever * @implements VectorStoreRetrieverInterface * @template V - Type of vector store implementing `VectorStoreInterface`. */ export class VectorStoreRetriever< V extends VectorStoreInterface = VectorStoreInterface > extends BaseRetriever implements VectorStoreRetrieverInterface { static lc_name() { return "VectorStoreRetriever"; } get lc_namespace() { return ["langchain_core", "vectorstores"]; } /** * The instance of `VectorStore` used for storing and retrieving document embeddings. * This vector store must implement the `VectorStoreInterface` to be compatible * with the retriever’s operations. */ vectorStore: V; /** * Specifies the number of documents to retrieve for each search query. * Defaults to 4 if not specified, providing a basic result count for similarity or MMR searches. */ k = 4; /** * Determines the type of search operation to perform on the vector store. * * - `"similarity"` (default): Conducts a similarity search based purely on vector similarity * to the query. * - `"mmr"`: Executes a maximal marginal relevance (MMR) search, balancing relevance and * diversity in the retrieved results. */ searchType = "similarity"; /** * Additional options specific to maximal marginal relevance (MMR) search, applicable * only if `searchType` is set to `"mmr"`. * * Includes: * - `fetchK`: The initial number of documents fetched before applying the MMR algorithm, * allowing for a larger selection from which to choose the most diverse results. * - `lambda`: A parameter between 0 and 1 to adjust the relevance-diversity balance, * where 0 prioritizes diversity and 1 prioritizes relevance. */ searchKwargs?: VectorStoreRetrieverMMRSearchKwargs; /** * Optional filter applied to search results, defined by the `FilterType` of the vector store. * Allows for refined, targeted results by restricting the returned documents based * on specified filter criteria. */ filter?: V["FilterType"]; /** * Returns the type of vector store, as defined by the `vectorStore` instance. * * @returns {string} The vector store type. */ _vectorstoreType(): string { return this.vectorStore._vectorstoreType(); } /** * Initializes a new instance of `VectorStoreRetriever` with the specified configuration. * * This constructor configures the retriever to interact with a given `VectorStore` * and supports different retrieval strategies, including similarity search and maximal * marginal relevance (MMR) search. Various options allow customization of the number * of documents retrieved per query, filtering based on conditions, and fine-tuning * MMR-specific parameters. * * @param fields - Configuration options for setting up the retriever: * * - `vectorStore` (required): The `VectorStore` instance implementing `VectorStoreInterface` * that will be used to store and retrieve document embeddings. This is the core component * of the retriever, enabling vector-based similarity and MMR searches. * * - `k` (optional): Specifies the number of documents to retrieve per search query. If not * provided, defaults to 4. This count determines the number of most relevant documents returned * for each search operation, balancing performance with comprehensiveness. * * - `searchType` (optional): Defines the search approach used by the retriever, allowing for * flexibility between two methods: * - `"similarity"` (default): A similarity-based search, retrieving documents with high vector * similarity to the query. This type prioritizes relevance and is often used when diversity * among results is less critical. * - `"mmr"`: Maximal Marginal Relevance search, which combines relevance with diversity. MMR * is useful for scenarios where varied content is essential, as it selects results that * both match the query and introduce content diversity. * * - `filter` (optional): A filter of type `FilterType`, defined by the vector store, that allows * for refined and targeted search results. This filter applies specified conditions to limit * which documents are eligible for retrieval, offering control over the scope of results. * * - `searchKwargs` (optional, applicable only if `searchType` is `"mmr"`): Additional settings * for configuring MMR-specific behavior. These parameters allow further tuning of the MMR * search process: * - `fetchK`: The initial number of documents fetched from the vector store before the MMR * algorithm is applied. Fetching a larger set enables the algorithm to select a more * diverse subset of documents. * - `lambda`: A parameter controlling the relevance-diversity balance, where 0 emphasizes * diversity and 1 prioritizes relevance. Intermediate values provide a blend of the two, * allowing customization based on the importance of content variety relative to query relevance. */ constructor(fields: VectorStoreRetrieverInput<V>) { super(fields); this.vectorStore = fields.vectorStore; this.k = fields.k ?? this.k; this.searchType = fields.searchType ?? this.searchType; this.filter = fields.filter; if (fields.searchType === "mmr") { this.searchKwargs = fields.searchKwargs; } } /** * Retrieves relevant documents based on the specified query, using either * similarity or maximal marginal relevance (MMR) search. * * If `searchType` is set to `"mmr"`, performs an MMR search to balance * similarity and diversity among results. If `searchType` is `"similarity"`, * retrieves results purely based on similarity to the query. * * @param query - The query string used to find relevant documents. * @param runManager - Optional callback manager for tracking retrieval progress. * @returns A promise that resolves to an array of `DocumentInterface` instances * representing the most relevant documents to the query. * @throws {Error} Throws an error if MMR search is requested but not supported * by the vector store. * @protected */ async _getRelevantDocuments( query: string, runManager?: CallbackManagerForRetrieverRun ): Promise<DocumentInterface[]> { if (this.searchType === "mmr") { if (typeof this.vectorStore.maxMarginalRelevanceSearch !== "function") { throw new Error( `The vector store backing this retriever, ${this._vectorstoreType()} does not support max marginal relevance search.` ); } return this.vectorStore.maxMarginalRelevanceSearch( query, { k: this.k, filter: this.filter, ...this.searchKwargs, }, runManager?.getChild("vectorstore") ); } return this.vectorStore.similaritySearch( query, this.k, this.filter, runManager?.getChild("vectorstore") ); } /** * Adds an array of documents to the vector store, embedding them as part of * the storage process. * * This method delegates document embedding and storage to the `addDocuments` * method of the underlying vector store. * * @param documents - An array of documents to embed and add to the vector store. * @param options - Optional settings to customize document addition. * @returns A promise that resolves to an array of document IDs or `void`, * depending on the vector store's implementation. */ async addDocuments( documents: DocumentInterface[], options?: AddDocumentOptions ): Promise<string[] | void> { return this.vectorStore.addDocuments(documents, options); } } /** * Interface defining the structure and operations of a vector store, which * facilitates the storage, retrieval, and similarity search of document vectors. * * `VectorStoreInterface` provides methods for adding, deleting, and searching * documents based on vector embeddings, including support for similarity * search with optional filtering and relevance-based retrieval. * * @extends Serializable */ export interface VectorStoreInterface extends Serializable { /** * Defines the filter type used in search and delete operations. Can be an * object for structured conditions or a string for simpler filtering. */ FilterType: object | string; /** * Instance of `EmbeddingsInterface` used to generate vector embeddings for * documents, enabling vector-based search operations. */ embeddings: EmbeddingsInterface; /** * Returns a string identifying the type of vector store implementation, * useful for distinguishing between different vector storage backends. * * @returns {string} A string indicating the vector store type. */ _vectorstoreType(): string; /** * Adds precomputed vectors and their corresponding documents to the vector store. * * @param vectors - An array of vectors, with each vector representing a document. * @param documents - An array of `DocumentInterface` instances corresponding to each vector. * @param options - Optional configurations for adding documents, potentially covering indexing or metadata handling. * @returns A promise that resolves to an array of document IDs or void, depending on implementation. */ addVectors( vectors: number[][], documents: DocumentInterface[], options?: AddDocumentOptions ): Promise<string[] | void>; /** * Adds an array of documents to the vector store. * * @param documents - An array of documents to be embedded and stored in the vector store. * @param options - Optional configurations for embedding and storage operations. * @returns A promise that resolves to an array of document IDs or void, depending on implementation. */ addDocuments( documents: DocumentInterface[], options?: AddDocumentOptions ): Promise<string[] | void>; /** * Deletes documents from the vector store based on the specified parameters. * * @param _params - A flexible object containing key-value pairs that define * the conditions for selecting documents to delete. * @returns A promise that resolves once the deletion operation is complete. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any delete(_params?: Record<string, any>): Promise<void>; /** * Searches for documents similar to a given vector query and returns them * with similarity scores. * * @param query - A vector representing the query for similarity search. * @param k - The number of similar documents to return. * @param filter - Optional filter based on `FilterType` to restrict results. * @returns A promise that resolves to an array of tuples, each containing a * `DocumentInterface` and its corresponding similarity score. */ similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[DocumentInterface, number][]>; /** * Searches for documents similar to a text query, embedding the query * and retrieving documents based on vector similarity. * * @param query - The text query to search for. * @param k - Optional number of similar documents to return. * @param filter - Optional filter based on `FilterType` to restrict results. * @param callbacks - Optional callbacks for tracking progress or events * during the search process. * @returns A promise that resolves to an array of `DocumentInterface` * instances representing similar documents. */ similaritySearch( query: string, k?: number, filter?: this["FilterType"], callbacks?: Callbacks ): Promise<DocumentInterface[]>; /** * Searches for documents similar to a text query and includes similarity * scores in the result. * * @param query - The text query to search for. * @param k - Optional number of similar documents to return. * @param filter - Optional filter based on `FilterType` to restrict results. * @param callbacks - Optional callbacks for tracking progress or events * during the search process. * @returns A promise that resolves to an array of tuples, each containing * a `DocumentInterface` and its similarity score. */ similaritySearchWithScore( query: string, k?: number, filter?: this["FilterType"], callbacks?: Callbacks ): Promise<[DocumentInterface, number][]>; /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK - Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {this["FilterType"]} options.filter - Optional filter * @param _callbacks * * @returns {Promise<DocumentInterface[]>} - List of documents selected by maximal marginal relevance. */ maxMarginalRelevanceSearch?( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]>, callbacks: Callbacks | undefined ): Promise<DocumentInterface[]>; /** * Converts the vector store into a retriever, making it suitable for use in * retrieval-based workflows and allowing additional configuration. * * @param kOrFields - Optional parameter for specifying either the number of * documents to retrieve or partial retriever configurations. * @param filter - Optional filter based on `FilterType` for retrieval restriction. * @param callbacks - Optional callbacks for tracking retrieval events or progress. * @param tags - General-purpose tags to add contextual information to the retriever. * @param metadata - General-purpose metadata providing additional context * for retrieval. * @param verbose - If `true`, enables detailed logging during retrieval. * @returns An instance of `VectorStoreRetriever` configured with the specified options. */ asRetriever( kOrFields?: number | Partial<VectorStoreRetrieverInput<this>>, filter?: this["FilterType"], callbacks?: Callbacks, tags?: string[], metadata?: Record<string, unknown>, verbose?: boolean ): VectorStoreRetriever<this>; } /** * Abstract class representing a vector storage system for performing * similarity searches on embedded documents. * * `VectorStore` provides methods for adding precomputed vectors or documents, * removing documents based on criteria, and performing similarity searches * with optional scoring. Subclasses are responsible for implementing specific * storage mechanisms and the exact behavior of certain abstract methods. * * @abstract * @extends Serializable * @implements VectorStoreInterface */ export abstract class VectorStore extends Serializable implements VectorStoreInterface { declare FilterType: object | string; /** * Namespace within LangChain to uniquely identify this vector store's * location, based on the vector store type. * * @internal */ // Only ever instantiated in main LangChain lc_namespace = ["langchain", "vectorstores", this._vectorstoreType()]; /** * Embeddings interface for generating vector embeddings from text queries, * enabling vector-based similarity searches. */ embeddings: EmbeddingsInterface; /** * Initializes a new vector store with embeddings and database configuration. * * @param embeddings - Instance of `EmbeddingsInterface` used to embed queries. * @param dbConfig - Configuration settings for the database or storage system. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any constructor(embeddings: EmbeddingsInterface, dbConfig: Record<string, any>) { super(dbConfig); this.embeddings = embeddings; } /** * Returns a string representing the type of vector store, which subclasses * must implement to identify their specific vector storage type. * * @returns {string} A string indicating the vector store type. * @abstract */ abstract _vectorstoreType(): string; /** * Adds precomputed vectors and corresponding documents to the vector store. * * @param vectors - An array of vectors representing each document. * @param documents - Array of documents associated with each vector. * @param options - Optional configuration for adding vectors, such as indexing. * @returns A promise resolving to an array of document IDs or void, based on implementation. * @abstract */ abstract addVectors( vectors: number[][], documents: DocumentInterface[], options?: AddDocumentOptions ): Promise<string[] | void>; /** * Adds documents to the vector store, embedding them first through the * `embeddings` instance. * * @param documents - Array of documents to embed and add. * @param options - Optional configuration for embedding and storing documents. * @returns A promise resolving to an array of document IDs or void, based on implementation. * @abstract */ abstract addDocuments( documents: DocumentInterface[], options?: AddDocumentOptions ): Promise<string[] | void>; /** * Deletes documents from the vector store based on the specified parameters. * * @param _params - Flexible key-value pairs defining conditions for document deletion. * @returns A promise that resolves once the deletion is complete. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any async delete(_params?: Record<string, any>): Promise<void> { throw new Error("Not implemented."); } /** * Performs a similarity search using a vector query and returns results * along with their similarity scores. * * @param query - Vector representing the search query. * @param k - Number of similar results to return. * @param filter - Optional filter based on `FilterType` to restrict results. * @returns A promise resolving to an array of tuples containing documents and their similarity scores. * @abstract */ abstract similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[DocumentInterface, number][]>; /** * Searches for documents similar to a text query by embedding the query and * performing a similarity search on the resulting vector. * * @param query - Text query for finding similar documents. * @param k - Number of similar results to return. Defaults to 4. * @param filter - Optional filter based on `FilterType`. * @param _callbacks - Optional callbacks for monitoring search progress * @returns A promise resolving to an array of `DocumentInterface` instances representing similar documents. */ async similaritySearch( query: string, k = 4, filter: this["FilterType"] | undefined = undefined, _callbacks: Callbacks | undefined = undefined // implement passing to embedQuery later ): Promise<DocumentInterface[]> { const results = await this.similaritySearchVectorWithScore( await this.embeddings.embedQuery(query), k, filter ); return results.map((result) => result[0]); } /** * Searches for documents similar to a text query by embedding the query, * and returns results with similarity scores. * * @param query - Text query for finding similar documents. * @param k - Number of similar results to return. Defaults to 4. * @param filter - Optional filter based on `FilterType`. * @param _callbacks - Optional callbacks for monitoring search progress * @returns A promise resolving to an array of tuples, each containing a * document and its similarity score. */ async similaritySearchWithScore( query: string, k = 4, filter: this["FilterType"] | undefined = undefined, _callbacks: Callbacks | undefined = undefined // implement passing to embedQuery later ): Promise<[DocumentInterface, number][]> { return this.similaritySearchVectorWithScore( await this.embeddings.embedQuery(query), k, filter ); } /** * Return documents selected using the maximal marginal relevance. * Maximal marginal relevance optimizes for similarity to the query AND diversity * among selected documents. * * @param {string} query - Text to look up documents similar to. * @param {number} options.k - Number of documents to return. * @param {number} options.fetchK - Number of documents to fetch before passing to the MMR algorithm. * @param {number} options.lambda - Number between 0 and 1 that determines the degree of diversity among the results, * where 0 corresponds to maximum diversity and 1 to minimum diversity. * @param {this["FilterType"]} options.filter - Optional filter * @param _callbacks * * @returns {Promise<DocumentInterface[]>} - List of documents selected by maximal marginal relevance. */ async maxMarginalRelevanceSearch?( query: string, options: MaxMarginalRelevanceSearchOptions<this["FilterType"]>, _callbacks: Callbacks | undefined // implement passing to embedQuery later ): Promise<DocumentInterface[]>; /** * Creates a `VectorStore` instance from an array of text strings and optional * metadata, using the specified embeddings and database configuration. * * Subclasses must implement this method to define how text and metadata * are embedded and stored in the vector store. Throws an error if not overridden. * * @param _texts - Array of strings representing the text documents to be stored. * @param _metadatas - Metadata for the texts, either as an array (one for each text) * or a single object (applied to all texts). * @param _embeddings - Instance of `EmbeddingsInterface` to embed the texts. * @param _dbConfig - Database configuration settings. * @returns A promise that resolves to a new `VectorStore` instance. * @throws {Error} Throws an error if this method is not overridden by a subclass. */ static fromTexts( _texts: string[], _metadatas: object[] | object, _embeddings: EmbeddingsInterface, // eslint-disable-next-line @typescript-eslint/no-explicit-any _dbConfig: Record<string, any> ): Promise<VectorStore> { throw new Error( "the Langchain vectorstore implementation you are using forgot to override this, please report a bug" ); } /** * Creates a `VectorStore` instance from an array of documents, using the specified * embeddings and database configuration. * * Subclasses must implement this method to define how documents are embedded * and stored. Throws an error if not overridden. * * @param _docs - Array of `DocumentInterface` instances representing the documents to be stored. * @param _embeddings - Instance of `EmbeddingsInterface` to embed the documents. * @param _dbConfig - Database configuration settings. * @returns A promise that resolves to a new `VectorStore` instance. * @throws {Error} Throws an error if this method is not overridden by a subclass. */ static fromDocuments( _docs: DocumentInterface[], _embeddings: EmbeddingsInterface, // eslint-disable-next-line @typescript-eslint/no-explicit-any _dbConfig: Record<string, any> ): Promise<VectorStore> { throw new Error( "the Langchain vectorstore implementation you are using forgot to override this, please report a bug" ); } /** * Creates a `VectorStoreRetriever` instance with flexible configuration options. * * @param kOrFields * - If a number is provided, it sets the `k` parameter (number of items to retrieve). * - If an object is provided, it should contain various configuration options. * @param filter * - Optional filter criteria to limit the items retrieved based on the specified filter type. * @param callbacks * - Optional callbacks that may be triggered at specific stages of the retrieval process. * @param tags * - Tags to categorize or label the `VectorStoreRetriever`. Defaults to an empty array if not provided. * @param metadata * - Additional metadata as key-value pairs to add contextual information for the retrieval process. * @param verbose * - If `true`, enables detailed logging for the retrieval process. Defaults to `false`. * * @returns * - A configured `VectorStoreRetriever` instance based on the provided parameters. * * @example * Basic usage with a `k` value: * ```typescript * const retriever = myVectorStore.asRetriever(5); * ``` * * Usage with a configuration object: * ```typescript * const retriever = myVectorStore.asRetriever({ * k: 10, * filter: myFilter, * tags: ['example', 'test'], * verbose: true, * searchType: 'mmr', * searchKwargs: { alpha: 0.5 }, * }); * ``` */ asRetriever( kOrFields?: number | Partial<VectorStoreRetrieverInput<this>>, filter?: this["FilterType"], callbacks?: Callbacks, tags?: string[], metadata?: Record<string, unknown>, verbose?: boolean ): VectorStoreRetriever<this> { if (typeof kOrFields === "number") { return new VectorStoreRetriever({ vectorStore: this, k: kOrFields, filter, tags: [...(tags ?? []), this._vectorstoreType()], metadata, verbose, callbacks, }); } else { const params = { vectorStore: this, k: kOrFields?.k, filter: kOrFields?.filter, tags: [...(kOrFields?.tags ?? []), this._vectorstoreType()], metadata: kOrFields?.metadata, verbose: kOrFields?.verbose, callbacks: kOrFields?.callbacks, searchType: kOrFields?.searchType, }; if (kOrFields?.searchType === "mmr") { return new VectorStoreRetriever({ ...params, searchKwargs: kOrFields.searchKwargs, }); } return new VectorStoreRetriever({ ...params }); } } } /** * Abstract class extending `VectorStore` that defines a contract for saving * and loading vector store instances. * * The `SaveableVectorStore` class allows vector store implementations to * persist their data and retrieve it when needed.The format for saving and * loading data is left to the implementing subclass. * * Subclasses must implement the `save` method to handle their custom * serialization logic, while the `load` method enables reconstruction of a * vector store from saved data, requiring compatible embeddings through the * `EmbeddingsInterface`. * * @abstract * @extends VectorStore */ export abstract class SaveableVectorStore extends VectorStore { /** * Saves the current state of the vector store to the specified directory. * * This method must be implemented by subclasses to define their own * serialization process for persisting vector data. The implementation * determines the structure and format of the saved data. * * @param directory - The directory path where the vector store data * will be saved. * @abstract */ abstract save(directory: string): Promise<void>; /** * Loads a vector store instance from the specified directory, using the * provided embeddings to ensure compatibility. * * This static method reconstructs a `SaveableVectorStore` from previously * saved data. Implementations should interpret the saved data format to * recreate the vector store instance. * * @param _directory - The directory path from which the vector store * data will be loaded. * @param _embeddings - An instance of `EmbeddingsInterface` to align * the embeddings with the loaded vector data. * @returns A promise that resolves to a `SaveableVectorStore` instance * constructed from the saved data. */ static load( _directory: string, _embeddings: EmbeddingsInterface ): Promise<SaveableVectorStore> { throw new Error("Not implemented"); } }
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/src/chat_history.ts
import { Serializable } from "./load/serializable.js"; import { type BaseMessage, HumanMessage, AIMessage } from "./messages/index.js"; // TODO: Combine into one class for 0.2 /** * Base class for all chat message histories. All chat message histories * should extend this class. */ export abstract class BaseChatMessageHistory extends Serializable { public abstract getMessages(): Promise<BaseMessage[]>; public abstract addMessage(message: BaseMessage): Promise<void>; public abstract addUserMessage(message: string): Promise<void>; public abstract addAIChatMessage(message: string): Promise<void>; /** * Add a list of messages. * * Implementations should override this method to handle bulk addition of messages * in an efficient manner to avoid unnecessary round-trips to the underlying store. * * @param messages - A list of BaseMessage objects to store. */ public async addMessages(messages: BaseMessage[]): Promise<void> { for (const message of messages) { await this.addMessage(message); } } public abstract clear(): Promise<void>; } /** * Base class for all list chat message histories. All list chat message * histories should extend this class. */ export abstract class BaseListChatMessageHistory extends Serializable { /** Returns a list of messages stored in the store. */ public abstract getMessages(): Promise<BaseMessage[]>; /** * Add a message object to the store. */ public abstract addMessage(message: BaseMessage): Promise<void>; /** * This is a convenience method for adding a human message string to the store. * Please note that this is a convenience method. Code should favor the * bulk addMessages interface instead to save on round-trips to the underlying * persistence layer. * This method may be deprecated in a future release. */ public addUserMessage(message: string): Promise<void> { return this.addMessage(new HumanMessage(message)); } /** @deprecated Use addAIMessage instead */ public addAIChatMessage(message: string): Promise<void> { return this.addMessage(new AIMessage(message)); } /** * This is a convenience method for adding an AI message string to the store. * Please note that this is a convenience method. Code should favor the bulk * addMessages interface instead to save on round-trips to the underlying * persistence layer. * This method may be deprecated in a future release. */ public addAIMessage(message: string): Promise<void> { return this.addMessage(new AIMessage(message)); } /** * Add a list of messages. * * Implementations should override this method to handle bulk addition of messages * in an efficient manner to avoid unnecessary round-trips to the underlying store. * * @param messages - A list of BaseMessage objects to store. */ public async addMessages(messages: BaseMessage[]): Promise<void> { for (const message of messages) { await this.addMessage(message); } } /** * Remove all messages from the store. */ public clear(): Promise<void> { throw new Error("Not implemented."); } } /** * Class for storing chat message history in-memory. It extends the * BaseListChatMessageHistory class and provides methods to get, add, and * clear messages. */ export class InMemoryChatMessageHistory extends BaseListChatMessageHistory { lc_namespace = ["langchain", "stores", "message", "in_memory"]; private messages: BaseMessage[] = []; constructor(messages?: BaseMessage[]) { super(...arguments); this.messages = messages ?? []; } /** * Method to get all the messages stored in the ChatMessageHistory * instance. * @returns Array of stored BaseMessage instances. */ async getMessages(): Promise<BaseMessage[]> { return this.messages; } /** * Method to add a new message to the ChatMessageHistory instance. * @param message The BaseMessage instance to add. * @returns A promise that resolves when the message has been added. */ async addMessage(message: BaseMessage) { this.messages.push(message); } /** * Method to clear all the messages from the ChatMessageHistory instance. * @returns A promise that resolves when all messages have been cleared. */ async clear() { this.messages = []; } }
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/src/memory.ts
/** * Type alias for a record where the keys are strings and the values can * be any type. This is used to represent the input values for a Chain. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export type InputValues = Record<string, any>; /** * Type alias for a record where the keys are strings and the values can * be any type. This is used to represent the output values from a Chain. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export type OutputValues = Record<string, any>; /** * Type alias for a record where the keys are strings and the values can * be any type. This is used to represent the memory variables in a Chain. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export type MemoryVariables = Record<string, any>; /** * Abstract base class for memory in LangChain's Chains. Memory refers to * the state in Chains. It can be used to store information about past * executions of a Chain and inject that information into the inputs of * future executions of the Chain. */ export abstract class BaseMemory { abstract get memoryKeys(): string[]; /** * Abstract method that should take an object of input values and return a * Promise that resolves with an object of memory variables. The * implementation of this method should load the memory variables from the * provided input values. * @param values An object of input values. * @returns Promise that resolves with an object of memory variables. */ abstract loadMemoryVariables(values: InputValues): Promise<MemoryVariables>; /** * Abstract method that should take two objects, one of input values and * one of output values, and return a Promise that resolves when the * context has been saved. The implementation of this method should save * the context based on the provided input and output values. * @param inputValues An object of input values. * @param outputValues An object of output values. * @returns Promise that resolves when the context has been saved. */ abstract saveContext( inputValues: InputValues, outputValues: OutputValues ): Promise<void>; } const getValue = (values: InputValues | OutputValues, key?: string) => { if (key !== undefined) { return values[key]; } const keys = Object.keys(values); if (keys.length === 1) { return values[keys[0]]; } }; /** * This function is used by memory classes to select the input value * to use for the memory. If there is only one input value, it is used. * If there are multiple input values, the inputKey must be specified. */ export const getInputValue = (inputValues: InputValues, inputKey?: string) => { const value = getValue(inputValues, inputKey); if (!value) { const keys = Object.keys(inputValues); throw new Error( `input values have ${keys.length} keys, you must specify an input key or pass only 1 key as input` ); } return value; }; /** * This function is used by memory classes to select the output value * to use for the memory. If there is only one output value, it is used. * If there are multiple output values, the outputKey must be specified. * If no outputKey is specified, an error is thrown. */ export const getOutputValue = ( outputValues: OutputValues, outputKey?: string ) => { const value = getValue(outputValues, outputKey); if (!value && value !== "") { const keys = Object.keys(outputValues); throw new Error( `output values have ${keys.length} keys, you must specify an output key or pass only 1 key as output` ); } return value; }; /** * Function used by memory classes to get the key of the prompt input, * excluding any keys that are memory variables or the "stop" key. If * there is not exactly one prompt input key, an error is thrown. */ export function getPromptInputKey( inputs: Record<string, unknown>, memoryVariables: string[] ): string { const promptInputKeys = Object.keys(inputs).filter( (key) => !memoryVariables.includes(key) && key !== "stop" ); if (promptInputKeys.length !== 1) { throw new Error( `One input key expected, but got ${promptInputKeys.length}` ); } return promptInputKeys[0]; }
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/src/prompt_values.ts
import { Serializable } from "./load/serializable.js"; import { type BaseMessage } from "./messages/base.js"; import { HumanMessage } from "./messages/human.js"; import { getBufferString } from "./messages/utils.js"; export interface BasePromptValueInterface extends Serializable { toString(): string; toChatMessages(): BaseMessage[]; } export interface StringPromptValueInterface extends BasePromptValueInterface { value: string; } export interface ChatPromptValueInterface extends BasePromptValueInterface { messages: BaseMessage[]; } /** * Base PromptValue class. All prompt values should extend this class. */ export abstract class BasePromptValue extends Serializable implements BasePromptValueInterface { abstract toString(): string; abstract toChatMessages(): BaseMessage[]; } /** * Represents a prompt value as a string. It extends the BasePromptValue * class and overrides the toString and toChatMessages methods. */ export class StringPromptValue extends BasePromptValue implements StringPromptValueInterface { static lc_name(): string { return "StringPromptValue"; } lc_namespace = ["langchain_core", "prompt_values"]; lc_serializable = true; value: string; constructor(value: string) { super({ value }); this.value = value; } toString() { return this.value; } toChatMessages() { return [new HumanMessage(this.value)]; } } /** * Interface for the fields of a ChatPromptValue. */ export interface ChatPromptValueFields { messages: BaseMessage[]; } /** * Class that represents a chat prompt value. It extends the * BasePromptValue and includes an array of BaseMessage instances. */ export class ChatPromptValue extends BasePromptValue implements ChatPromptValueInterface { lc_namespace = ["langchain_core", "prompt_values"]; lc_serializable = true; static lc_name() { return "ChatPromptValue"; } messages: BaseMessage[]; constructor(messages: BaseMessage[]); constructor(fields: ChatPromptValueFields); constructor(fields: BaseMessage[] | ChatPromptValueFields) { if (Array.isArray(fields)) { // eslint-disable-next-line no-param-reassign fields = { messages: fields }; } super(fields); this.messages = fields.messages; } toString() { return getBufferString(this.messages); } toChatMessages() { return this.messages; } } export type ImageContent = { /** Specifies the detail level of the image. */ detail?: "auto" | "low" | "high"; /** Either a URL of the image or the base64 encoded image data. */ url: string; }; export interface ImagePromptValueFields { imageUrl: ImageContent; } /** * Class that represents an image prompt value. It extends the * BasePromptValue and includes an ImageURL instance. */ export class ImagePromptValue extends BasePromptValue { lc_namespace = ["langchain_core", "prompt_values"]; lc_serializable = true; static lc_name() { return "ImagePromptValue"; } imageUrl: ImageContent; /** @ignore */ value: string; constructor(fields: ImagePromptValueFields); constructor(fields: ImageContent); constructor(fields: ImageContent | ImagePromptValueFields) { if (!("imageUrl" in fields)) { // eslint-disable-next-line no-param-reassign fields = { imageUrl: fields }; } super(fields); this.imageUrl = fields.imageUrl; } toString() { return this.imageUrl.url; } toChatMessages() { return [ new HumanMessage({ content: [ { type: "image_url", image_url: { detail: this.imageUrl.detail, url: this.imageUrl.url, }, }, ], }), ]; } }
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/src/embeddings.ts
import { AsyncCaller, AsyncCallerParams } from "./utils/async_caller.js"; /** * The parameters required to initialize an instance of the Embeddings * class. */ export type EmbeddingsParams = AsyncCallerParams; export interface EmbeddingsInterface { /** * An abstract method that takes an array of documents as input and * returns a promise that resolves to an array of vectors for each * document. * @param documents An array of documents to be embedded. * @returns A promise that resolves to an array of vectors for each document. */ embedDocuments(documents: string[]): Promise<number[][]>; /** * An abstract method that takes a single document as input and returns a * promise that resolves to a vector for the query document. * @param document A single document to be embedded. * @returns A promise that resolves to a vector for the query document. */ embedQuery(document: string): Promise<number[]>; } /** * An abstract class that provides methods for embedding documents and * queries using LangChain. */ export abstract class Embeddings implements EmbeddingsInterface { /** * The async caller should be used by subclasses to make any async calls, * which will thus benefit from the concurrency and retry logic. */ caller: AsyncCaller; constructor(params: EmbeddingsParams) { this.caller = new AsyncCaller(params ?? {}); } /** * An abstract method that takes an array of documents as input and * returns a promise that resolves to an array of vectors for each * document. * @param documents An array of documents to be embedded. * @returns A promise that resolves to an array of vectors for each document. */ abstract embedDocuments(documents: string[]): Promise<number[][]>; /** * An abstract method that takes a single document as input and returns a * promise that resolves to a vector for the query document. * @param document A single document to be embedded. * @returns A promise that resolves to a vector for the query document. */ abstract embedQuery(document: string): Promise<number[]>; }
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/src/context.ts
/* __LC_ALLOW_ENTRYPOINT_SIDE_EFFECTS__ */ import { AsyncLocalStorage } from "node:async_hooks"; import { RunTree } from "langsmith"; import { isRunTree } from "langsmith/run_trees"; import { _CONTEXT_VARIABLES_KEY, AsyncLocalStorageProviderSingleton, } from "./singletons/index.js"; AsyncLocalStorageProviderSingleton.initializeGlobalInstance( new AsyncLocalStorage() ); /** * Set a context variable. Context variables are scoped to any * child runnables called by the current runnable, or globally if set outside * of any runnable. * * @remarks * This function is only supported in environments that support AsyncLocalStorage, * including Node.js, Deno, and Cloudflare Workers. * * @example * ```ts * import { RunnableLambda } from "@langchain/core/runnables"; * import { * getContextVariable, * setContextVariable * } from "@langchain/core/context"; * * const nested = RunnableLambda.from(() => { * // "bar" because it was set by a parent * console.log(getContextVariable("foo")); * * // Override to "baz", but only for child runnables * setContextVariable("foo", "baz"); * * // Now "baz", but only for child runnables * return getContextVariable("foo"); * }); * * const runnable = RunnableLambda.from(async () => { * // Set a context variable named "foo" * setContextVariable("foo", "bar"); * * const res = await nested.invoke({}); * * // Still "bar" since child changes do not affect parents * console.log(getContextVariable("foo")); * * return res; * }); * * // undefined, because context variable has not been set yet * console.log(getContextVariable("foo")); * * // Final return value is "baz" * const result = await runnable.invoke({}); * ``` * * @param name The name of the context variable. * @param value The value to set. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export function setContextVariable<T>(name: PropertyKey, value: T): void { const runTree = AsyncLocalStorageProviderSingleton.getInstance().getStore(); const contextVars = { ...runTree?.[_CONTEXT_VARIABLES_KEY] }; contextVars[name] = value; let newValue = {}; if (isRunTree(runTree)) { newValue = new RunTree(runTree); } // eslint-disable-next-line @typescript-eslint/no-explicit-any (newValue as any)[_CONTEXT_VARIABLES_KEY] = contextVars; AsyncLocalStorageProviderSingleton.getInstance().enterWith(newValue); } /** * Get the value of a previously set context variable. Context variables * are scoped to any child runnables called by the current runnable, * or globally if set outside of any runnable. * * @remarks * This function is only supported in environments that support AsyncLocalStorage, * including Node.js, Deno, and Cloudflare Workers. * * @example * ```ts * import { RunnableLambda } from "@langchain/core/runnables"; * import { * getContextVariable, * setContextVariable * } from "@langchain/core/context"; * * const nested = RunnableLambda.from(() => { * // "bar" because it was set by a parent * console.log(getContextVariable("foo")); * * // Override to "baz", but only for child runnables * setContextVariable("foo", "baz"); * * // Now "baz", but only for child runnables * return getContextVariable("foo"); * }); * * const runnable = RunnableLambda.from(async () => { * // Set a context variable named "foo" * setContextVariable("foo", "bar"); * * const res = await nested.invoke({}); * * // Still "bar" since child changes do not affect parents * console.log(getContextVariable("foo")); * * return res; * }); * * // undefined, because context variable has not been set yet * console.log(getContextVariable("foo")); * * // Final return value is "baz" * const result = await runnable.invoke({}); * ``` * * @param name The name of the context variable. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export function getContextVariable<T = any>(name: PropertyKey): T | undefined { const runTree = AsyncLocalStorageProviderSingleton.getInstance().getStore(); return runTree?.[_CONTEXT_VARIABLES_KEY]?.[name]; }
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/src/outputs.ts
import { type BaseMessage, type BaseMessageChunk } from "./messages/base.js"; export const RUN_KEY = "__run"; /** * Output of a single generation. */ export interface Generation { /** * Generated text output */ text: string; /** * Raw generation info response from the provider. * May include things like reason for finishing (e.g. in {@link OpenAI}) */ // eslint-disable-next-line @typescript-eslint/no-explicit-any generationInfo?: Record<string, any>; } export type GenerationChunkFields = { text: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any generationInfo?: Record<string, any>; }; /** * Chunk of a single generation. Used for streaming. */ export class GenerationChunk implements Generation { public text: string; // eslint-disable-next-line @typescript-eslint/no-explicit-any public generationInfo?: Record<string, any>; constructor(fields: GenerationChunkFields) { this.text = fields.text; this.generationInfo = fields.generationInfo; } concat(chunk: GenerationChunk): GenerationChunk { return new GenerationChunk({ text: this.text + chunk.text, generationInfo: { ...this.generationInfo, ...chunk.generationInfo, }, }); } } /** * Contains all relevant information returned by an LLM. */ export type LLMResult = { /** * List of the things generated. Each input could have multiple {@link Generation | generations}, hence this is a list of lists. */ generations: Generation[][]; /** * Dictionary of arbitrary LLM-provider specific output. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any llmOutput?: Record<string, any>; /** * Dictionary of run metadata */ // eslint-disable-next-line @typescript-eslint/no-explicit-any [RUN_KEY]?: Record<string, any>; }; export interface ChatGeneration extends Generation { message: BaseMessage; } export type ChatGenerationChunkFields = GenerationChunkFields & { message: BaseMessageChunk; }; export class ChatGenerationChunk extends GenerationChunk implements ChatGeneration { public message: BaseMessageChunk; constructor(fields: ChatGenerationChunkFields) { super(fields); this.message = fields.message; } concat(chunk: ChatGenerationChunk) { return new ChatGenerationChunk({ text: this.text + chunk.text, generationInfo: { ...this.generationInfo, ...chunk.generationInfo, }, message: this.message.concat(chunk.message), }); } } export interface ChatResult { generations: ChatGeneration[]; // eslint-disable-next-line @typescript-eslint/no-explicit-any llmOutput?: Record<string, any>; }
0
lc_public_repos/langchainjs/langchain-core
lc_public_repos/langchainjs/langchain-core/src/stores.ts
import { Serializable } from "./load/serializable.js"; /** @deprecated For backwards compatibility only. Remove on next minor version upgrade. */ export interface BaseStoreInterface<K, V> { /** * Method to get multiple values for a set of keys. * @param {K[]} keys - An array of keys. * @returns {Promise<(V | undefined)[]>} - A Promise that resolves with array of values or undefined if key not found. */ mget(keys: K[]): Promise<(V | undefined)[]>; /** * Method to set a value for multiple keys. * @param {[K, V][]} keyValuePairs - An array of key-value pairs. * @returns {Promise<void>} - A Promise that resolves when the operation is complete. */ mset(keyValuePairs: [K, V][]): Promise<void>; /** * Method to delete multiple keys. * @param {K[]} keys - An array of keys to delete. * @returns {Promise<void>} - A Promise that resolves when the operation is complete. */ mdelete(keys: K[]): Promise<void>; /** * Method to yield keys optionally based on a prefix. * @param {string} prefix - Optional prefix to filter keys. * @returns {AsyncGenerator<K | string>} - An asynchronous generator that yields keys on iteration. */ yieldKeys(prefix?: string): AsyncGenerator<K | string>; } /** * Abstract interface for a key-value store. */ export abstract class BaseStore<K, V> extends Serializable implements BaseStoreInterface<K, V> { /** * Abstract method to get multiple values for a set of keys. * @param {K[]} keys - An array of keys. * @returns {Promise<(V | undefined)[]>} - A Promise that resolves with array of values or undefined if key not found. */ abstract mget(keys: K[]): Promise<(V | undefined)[]>; /** * Abstract method to set a value for multiple keys. * @param {[K, V][]} keyValuePairs - An array of key-value pairs. * @returns {Promise<void>} - A Promise that resolves when the operation is complete. */ abstract mset(keyValuePairs: [K, V][]): Promise<void>; /** * Abstract method to delete multiple keys. * @param {K[]} keys - An array of keys to delete. * @returns {Promise<void>} - A Promise that resolves when the operation is complete. */ abstract mdelete(keys: K[]): Promise<void>; /** * Abstract method to yield keys optionally based on a prefix. * @param {string} prefix - Optional prefix to filter keys. * @returns {AsyncGenerator<K | string>} - An asynchronous generator that yields keys on iteration. */ abstract yieldKeys(prefix?: string): AsyncGenerator<K | string>; } /** * In-memory implementation of the BaseStore using a dictionary. Used for * storing key-value pairs in memory. * @example * ```typescript * const store = new InMemoryStore<BaseMessage>(); * await store.mset( * Array.from({ length: 5 }).map((_, index) => [ * `message:id:${index}`, * index % 2 === 0 * ? new AIMessage("ai stuff...") * : new HumanMessage("human stuff..."), * ]), * ); * * const retrievedMessages = await store.mget(["message:id:0", "message:id:1"]); * await store.mdelete(await store.yieldKeys("message:id:").toArray()); * ``` */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export class InMemoryStore<T = any> extends BaseStore<string, T> { lc_namespace = ["langchain", "storage"]; protected store: Record<string, T> = {}; /** * Retrieves the values associated with the given keys from the store. * @param keys Keys to retrieve values for. * @returns Array of values associated with the given keys. */ async mget(keys: string[]) { return keys.map((key) => this.store[key]); } /** * Sets the values for the given keys in the store. * @param keyValuePairs Array of key-value pairs to set in the store. * @returns Promise that resolves when all key-value pairs have been set. */ async mset(keyValuePairs: [string, T][]): Promise<void> { for (const [key, value] of keyValuePairs) { this.store[key] = value; } } /** * Deletes the given keys and their associated values from the store. * @param keys Keys to delete from the store. * @returns Promise that resolves when all keys have been deleted. */ async mdelete(keys: string[]): Promise<void> { for (const key of keys) { delete this.store[key]; } } /** * Asynchronous generator that yields keys from the store. If a prefix is * provided, it only yields keys that start with the prefix. * @param prefix Optional prefix to filter keys. * @returns AsyncGenerator that yields keys from the store. */ async *yieldKeys(prefix?: string | undefined): AsyncGenerator<string> { const keys = Object.keys(this.store); for (const key of keys) { if (prefix === undefined || key.startsWith(prefix)) { yield key; } } } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tracers/tracer_langchain_v1.ts
import type { ChainValues } from "../utils/types/index.js"; import { type BaseMessage } from "../messages/base.js"; import { getBufferString } from "../messages/utils.js"; import type { LLMResult } from "../outputs.js"; import { getEnvironmentVariable } from "../utils/env.js"; import { BaseTracer, type RunType, type Run } from "./base.js"; export interface BaseRunV1 { uuid: string; parent_uuid?: string; start_time: number; end_time?: number; execution_order: number; child_execution_order: number; serialized: { name: string }; session_id: number; error?: string; type: RunType; } export interface LLMRun extends BaseRunV1 { prompts: string[]; response?: LLMResult; } export interface ChainRun extends BaseRunV1 { inputs: ChainValues; outputs?: ChainValues; child_llm_runs: LLMRun[]; child_chain_runs: ChainRun[]; child_tool_runs: ToolRun[]; } export interface ToolRun extends BaseRunV1 { tool_input: string; output?: string; action: string; child_llm_runs: LLMRun[]; child_chain_runs: ChainRun[]; child_tool_runs: ToolRun[]; } export interface BaseTracerSession { start_time: number; name?: string; } export type TracerSessionCreate = BaseTracerSession; export interface TracerSessionV1 extends BaseTracerSession { id: number; } /** @deprecated Use LangChainTracer instead. */ export class LangChainTracerV1 extends BaseTracer { name = "langchain_tracer"; protected endpoint = getEnvironmentVariable("LANGCHAIN_ENDPOINT") || "http://localhost:1984"; protected headers: Record<string, string> = { "Content-Type": "application/json", }; protected session: TracerSessionV1; constructor() { super(); const apiKey = getEnvironmentVariable("LANGCHAIN_API_KEY"); if (apiKey) { this.headers["x-api-key"] = apiKey; } } async newSession(sessionName?: string): Promise<TracerSessionV1> { const sessionCreate: TracerSessionCreate = { start_time: Date.now(), name: sessionName, }; const session = await this.persistSession(sessionCreate); this.session = session; return session; } async loadSession(sessionName: string): Promise<TracerSessionV1> { const endpoint = `${this.endpoint}/sessions?name=${sessionName}`; return this._handleSessionResponse(endpoint); } async loadDefaultSession(): Promise<TracerSessionV1> { const endpoint = `${this.endpoint}/sessions?name=default`; return this._handleSessionResponse(endpoint); } protected async convertV2RunToRun( run: Run ): Promise<LLMRun | ChainRun | ToolRun> { const session = this.session ?? (await this.loadDefaultSession()); const serialized = run.serialized as { name: string }; let runResult: LLMRun | ChainRun | ToolRun; if (run.run_type === "llm") { const prompts: string[] = run.inputs.prompts ? run.inputs.prompts : (run.inputs.messages as BaseMessage[][]).map((x) => getBufferString(x) ); const llmRun: LLMRun = { uuid: run.id, start_time: run.start_time, end_time: run.end_time, execution_order: run.execution_order, child_execution_order: run.child_execution_order, serialized, type: run.run_type, session_id: session.id, prompts, response: run.outputs as LLMResult, }; runResult = llmRun; } else if (run.run_type === "chain") { const child_runs = await Promise.all( run.child_runs.map((child_run) => this.convertV2RunToRun(child_run)) ); const chainRun: ChainRun = { uuid: run.id, start_time: run.start_time, end_time: run.end_time, execution_order: run.execution_order, child_execution_order: run.child_execution_order, serialized, type: run.run_type, session_id: session.id, inputs: run.inputs, outputs: run.outputs, child_llm_runs: child_runs.filter( (child_run) => child_run.type === "llm" ) as LLMRun[], child_chain_runs: child_runs.filter( (child_run) => child_run.type === "chain" ) as ChainRun[], child_tool_runs: child_runs.filter( (child_run) => child_run.type === "tool" ) as ToolRun[], }; runResult = chainRun; } else if (run.run_type === "tool") { const child_runs = await Promise.all( run.child_runs.map((child_run) => this.convertV2RunToRun(child_run)) ); const toolRun: ToolRun = { uuid: run.id, start_time: run.start_time, end_time: run.end_time, execution_order: run.execution_order, child_execution_order: run.child_execution_order, serialized, type: run.run_type, session_id: session.id, tool_input: run.inputs.input, output: run.outputs?.output, action: JSON.stringify(serialized), child_llm_runs: child_runs.filter( (child_run) => child_run.type === "llm" ) as LLMRun[], child_chain_runs: child_runs.filter( (child_run) => child_run.type === "chain" ) as ChainRun[], child_tool_runs: child_runs.filter( (child_run) => child_run.type === "tool" ) as ToolRun[], }; runResult = toolRun; } else { throw new Error(`Unknown run type: ${run.run_type}`); } return runResult; } protected async persistRun( run: Run | LLMRun | ChainRun | ToolRun ): Promise<void> { let endpoint; let v1Run: LLMRun | ChainRun | ToolRun; if ((run as Run).run_type !== undefined) { v1Run = await this.convertV2RunToRun(run as Run); } else { v1Run = run as LLMRun | ChainRun | ToolRun; } if (v1Run.type === "llm") { endpoint = `${this.endpoint}/llm-runs`; } else if (v1Run.type === "chain") { endpoint = `${this.endpoint}/chain-runs`; } else { endpoint = `${this.endpoint}/tool-runs`; } const response = await fetch(endpoint, { method: "POST", headers: this.headers, body: JSON.stringify(v1Run), }); if (!response.ok) { console.error( `Failed to persist run: ${response.status} ${response.statusText}` ); } } protected async persistSession( sessionCreate: BaseTracerSession ): Promise<TracerSessionV1> { const endpoint = `${this.endpoint}/sessions`; const response = await fetch(endpoint, { method: "POST", headers: this.headers, body: JSON.stringify(sessionCreate), }); if (!response.ok) { console.error( `Failed to persist session: ${response.status} ${response.statusText}, using default session.` ); return { id: 1, ...sessionCreate, }; } return { id: (await response.json()).id, ...sessionCreate, }; } protected async _handleSessionResponse( endpoint: string ): Promise<TracerSessionV1> { const response = await fetch(endpoint, { method: "GET", headers: this.headers, }); let tracerSession: TracerSessionV1; if (!response.ok) { console.error( `Failed to load session: ${response.status} ${response.statusText}` ); tracerSession = { id: 1, start_time: Date.now(), }; this.session = tracerSession; return tracerSession; } const resp = (await response.json()) as TracerSessionV1[]; if (resp.length === 0) { tracerSession = { id: 1, start_time: Date.now(), }; this.session = tracerSession; return tracerSession; } [tracerSession] = resp; this.session = tracerSession; return tracerSession; } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tracers/root_listener.ts
import { RunnableConfig } from "../runnables/config.js"; import { BaseTracer, Run } from "./base.js"; export class RootListenersTracer extends BaseTracer { name = "RootListenersTracer"; /** The Run's ID. Type UUID */ rootId?: string; config: RunnableConfig; argOnStart?: (run: Run, config: RunnableConfig) => void | Promise<void>; argOnEnd?: (run: Run, config: RunnableConfig) => void | Promise<void>; argOnError?: (run: Run, config: RunnableConfig) => void | Promise<void>; constructor({ config, onStart, onEnd, onError, }: { config: RunnableConfig; onStart?: (run: Run, config: RunnableConfig) => void | Promise<void>; onEnd?: (run: Run, config: RunnableConfig) => void | Promise<void>; onError?: (run: Run, config: RunnableConfig) => void | Promise<void>; }) { super({ _awaitHandler: true }); this.config = config; this.argOnStart = onStart; this.argOnEnd = onEnd; this.argOnError = onError; } /** * This is a legacy method only called once for an entire run tree * therefore not useful here * @param {Run} _ Not used */ persistRun(_: Run): Promise<void> { return Promise.resolve(); } async onRunCreate(run: Run) { if (this.rootId) { return; } this.rootId = run.id; if (this.argOnStart) { await this.argOnStart(run, this.config); } } async onRunUpdate(run: Run) { if (run.id !== this.rootId) { return; } if (!run.error) { if (this.argOnEnd) { await this.argOnEnd(run, this.config); } } else if (this.argOnError) { await this.argOnError(run, this.config); } } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tracers/log_stream.ts
import { applyPatch, type Operation as JSONPatchOperation, } from "../utils/fast-json-patch/index.js"; import { BaseTracer, type Run } from "./base.js"; import { BaseCallbackHandler, BaseCallbackHandlerInput, HandleLLMNewTokenCallbackFields, } from "../callbacks/base.js"; import { IterableReadableStream } from "../utils/stream.js"; import { ChatGenerationChunk, GenerationChunk } from "../outputs.js"; import { AIMessageChunk } from "../messages/ai.js"; import type { StreamEvent, StreamEventData } from "./event_stream.js"; export type { StreamEvent, StreamEventData }; /** * Interface that represents the structure of a log entry in the * `LogStreamCallbackHandler`. */ export type LogEntry = { /** ID of the sub-run. */ id: string; /** Name of the object being run. */ name: string; /** Type of the object being run, eg. prompt, chain, llm, etc. */ type: string; /** List of tags for the run. */ tags: string[]; /** Key-value pairs of metadata for the run. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any metadata: Record<string, any>; /** ISO-8601 timestamp of when the run started. */ start_time: string; /** List of general output chunks streamed by this run. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any streamed_output: any[]; /** List of LLM tokens streamed by this run, if applicable. */ streamed_output_str: string[]; /** Inputs to this run. Not available currently via streamLog. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any inputs?: any; /** Final output of this run. Only available after the run has finished successfully. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any final_output?: any; /** ISO-8601 timestamp of when the run ended. Only available after the run has finished. */ end_time?: string; }; export type RunState = { /** ID of the sub-run. */ id: string; /** List of output chunks streamed by Runnable.stream() */ // eslint-disable-next-line @typescript-eslint/no-explicit-any streamed_output: any[]; /** Final output of the run, usually the result of aggregating streamed_output. Only available after the run has finished successfully. */ // eslint-disable-next-line @typescript-eslint/no-explicit-any final_output?: any; /** * List of sub-runs contained in this run, if any, in the order they were started. * If filters were supplied, this list will contain only the runs that matched the filters. */ logs: Record<string, LogEntry>; /** Name of the object being run. */ name: string; /** Type of the object being run, eg. prompt, chain, llm, etc. */ type: string; }; /** * List of jsonpatch JSONPatchOperations, which describe how to create the run state * from an empty dict. This is the minimal representation of the log, designed to * be serialized as JSON and sent over the wire to reconstruct the log on the other * side. Reconstruction of the state can be done with any jsonpatch-compliant library, * see https://jsonpatch.com for more information. */ export class RunLogPatch { ops: JSONPatchOperation[]; constructor(fields: { ops?: JSONPatchOperation[] }) { this.ops = fields.ops ?? []; } concat(other: RunLogPatch) { const ops = this.ops.concat(other.ops); const states = applyPatch({}, ops); // eslint-disable-next-line @typescript-eslint/no-use-before-define return new RunLog({ ops, state: states[states.length - 1].newDocument as RunState, }); } } export class RunLog extends RunLogPatch { state: RunState; constructor(fields: { ops?: JSONPatchOperation[]; state: RunState }) { super(fields); this.state = fields.state; } concat(other: RunLogPatch) { const ops = this.ops.concat(other.ops); const states = applyPatch(this.state, other.ops); return new RunLog({ ops, state: states[states.length - 1].newDocument }); } static fromRunLogPatch(patch: RunLogPatch) { const states = applyPatch({}, patch.ops); // eslint-disable-next-line @typescript-eslint/no-use-before-define return new RunLog({ ops: patch.ops, state: states[states.length - 1].newDocument as RunState, }); } } export type SchemaFormat = "original" | "streaming_events"; export interface LogStreamCallbackHandlerInput extends BaseCallbackHandlerInput { autoClose?: boolean; includeNames?: string[]; includeTypes?: string[]; includeTags?: string[]; excludeNames?: string[]; excludeTypes?: string[]; excludeTags?: string[]; _schemaFormat?: SchemaFormat; } export const isLogStreamHandler = ( handler: BaseCallbackHandler ): handler is LogStreamCallbackHandler => handler.name === "log_stream_tracer"; /** * Extract standardized inputs from a run. * * Standardizes the inputs based on the type of the runnable used. * * @param run - Run object * @param schemaFormat - The schema format to use. * * @returns Valid inputs are only dict. By conventions, inputs always represented * invocation using named arguments. * A null means that the input is not yet known! */ async function _getStandardizedInputs(run: Run, schemaFormat: SchemaFormat) { if (schemaFormat === "original") { throw new Error( "Do not assign inputs with original schema drop the key for now. " + "When inputs are added to streamLog they should be added with " + "standardized schema for streaming events." ); } const { inputs } = run; if (["retriever", "llm", "prompt"].includes(run.run_type)) { return inputs; } if (Object.keys(inputs).length === 1 && inputs?.input === "") { return undefined; } // new style chains // These nest an additional 'input' key inside the 'inputs' to make sure // the input is always a dict. We need to unpack and user the inner value. // We should try to fix this in Runnables and callbacks/tracers // Runnables should be using a null type here not a placeholder // dict. return inputs.input; } async function _getStandardizedOutputs(run: Run, schemaFormat: SchemaFormat) { const { outputs } = run; if (schemaFormat === "original") { // Return the old schema, without standardizing anything return outputs; } if (["retriever", "llm", "prompt"].includes(run.run_type)) { return outputs; } // TODO: Remove this hacky check if ( outputs !== undefined && Object.keys(outputs).length === 1 && outputs?.output !== undefined ) { return outputs.output; } return outputs; } function isChatGenerationChunk( x?: ChatGenerationChunk | GenerationChunk ): x is ChatGenerationChunk { return x !== undefined && (x as ChatGenerationChunk).message !== undefined; } /** * Class that extends the `BaseTracer` class from the * `langchain.callbacks.tracers.base` module. It represents a callback * handler that logs the execution of runs and emits `RunLog` instances to a * `RunLogStream`. */ export class LogStreamCallbackHandler extends BaseTracer { protected autoClose = true; protected includeNames?: string[]; protected includeTypes?: string[]; protected includeTags?: string[]; protected excludeNames?: string[]; protected excludeTypes?: string[]; protected excludeTags?: string[]; protected _schemaFormat: SchemaFormat = "original"; protected rootId?: string; private keyMapByRunId: Record<string, string> = {}; private counterMapByRunName: Record<string, number> = {}; protected transformStream: TransformStream; public writer: WritableStreamDefaultWriter; public receiveStream: IterableReadableStream<RunLogPatch>; name = "log_stream_tracer"; constructor(fields?: LogStreamCallbackHandlerInput) { super({ _awaitHandler: true, ...fields }); this.autoClose = fields?.autoClose ?? true; this.includeNames = fields?.includeNames; this.includeTypes = fields?.includeTypes; this.includeTags = fields?.includeTags; this.excludeNames = fields?.excludeNames; this.excludeTypes = fields?.excludeTypes; this.excludeTags = fields?.excludeTags; this._schemaFormat = fields?._schemaFormat ?? this._schemaFormat; this.transformStream = new TransformStream(); this.writer = this.transformStream.writable.getWriter(); this.receiveStream = IterableReadableStream.fromReadableStream( this.transformStream.readable ); } [Symbol.asyncIterator]() { return this.receiveStream; } protected async persistRun(_run: Run): Promise<void> { // This is a legacy method only called once for an entire run tree // and is therefore not useful here } _includeRun(run: Run): boolean { if (run.id === this.rootId) { return false; } const runTags = run.tags ?? []; let include = this.includeNames === undefined && this.includeTags === undefined && this.includeTypes === undefined; if (this.includeNames !== undefined) { include = include || this.includeNames.includes(run.name); } if (this.includeTypes !== undefined) { include = include || this.includeTypes.includes(run.run_type); } if (this.includeTags !== undefined) { include = include || runTags.find((tag) => this.includeTags?.includes(tag)) !== undefined; } if (this.excludeNames !== undefined) { include = include && !this.excludeNames.includes(run.name); } if (this.excludeTypes !== undefined) { include = include && !this.excludeTypes.includes(run.run_type); } if (this.excludeTags !== undefined) { include = include && runTags.every((tag) => !this.excludeTags?.includes(tag)); } return include; } async *tapOutputIterable<T>( runId: string, output: AsyncGenerator<T> ): AsyncGenerator<T> { // Tap an output async iterator to stream its values to the log. for await (const chunk of output) { // root run is handled in .streamLog() if (runId !== this.rootId) { // if we can't find the run silently ignore // eg. because this run wasn't included in the log const key = this.keyMapByRunId[runId]; if (key) { await this.writer.write( new RunLogPatch({ ops: [ { op: "add", path: `/logs/${key}/streamed_output/-`, value: chunk, }, ], }) ); } } yield chunk; } } async onRunCreate(run: Run): Promise<void> { if (this.rootId === undefined) { this.rootId = run.id; await this.writer.write( new RunLogPatch({ ops: [ { op: "replace", path: "", value: { id: run.id, name: run.name, type: run.run_type, streamed_output: [], final_output: undefined, logs: {}, }, }, ], }) ); } if (!this._includeRun(run)) { return; } if (this.counterMapByRunName[run.name] === undefined) { this.counterMapByRunName[run.name] = 0; } this.counterMapByRunName[run.name] += 1; const count = this.counterMapByRunName[run.name]; this.keyMapByRunId[run.id] = count === 1 ? run.name : `${run.name}:${count}`; const logEntry: LogEntry = { id: run.id, name: run.name, type: run.run_type, tags: run.tags ?? [], metadata: run.extra?.metadata ?? {}, start_time: new Date(run.start_time).toISOString(), streamed_output: [], streamed_output_str: [], final_output: undefined, end_time: undefined, }; if (this._schemaFormat === "streaming_events") { logEntry.inputs = await _getStandardizedInputs(run, this._schemaFormat); } await this.writer.write( new RunLogPatch({ ops: [ { op: "add", path: `/logs/${this.keyMapByRunId[run.id]}`, value: logEntry, }, ], }) ); } async onRunUpdate(run: Run): Promise<void> { try { const runName = this.keyMapByRunId[run.id]; if (runName === undefined) { return; } const ops: JSONPatchOperation[] = []; if (this._schemaFormat === "streaming_events") { ops.push({ op: "replace", path: `/logs/${runName}/inputs`, value: await _getStandardizedInputs(run, this._schemaFormat), }); } ops.push({ op: "add", path: `/logs/${runName}/final_output`, value: await _getStandardizedOutputs(run, this._schemaFormat), }); if (run.end_time !== undefined) { ops.push({ op: "add", path: `/logs/${runName}/end_time`, value: new Date(run.end_time).toISOString(), }); } const patch = new RunLogPatch({ ops }); await this.writer.write(patch); } finally { if (run.id === this.rootId) { const patch = new RunLogPatch({ ops: [ { op: "replace", path: "/final_output", value: await _getStandardizedOutputs(run, this._schemaFormat), }, ], }); await this.writer.write(patch); if (this.autoClose) { await this.writer.close(); } } } } async onLLMNewToken( run: Run, token: string, kwargs?: HandleLLMNewTokenCallbackFields ): Promise<void> { const runName = this.keyMapByRunId[run.id]; if (runName === undefined) { return; } // TODO: Remove hack const isChatModel = run.inputs.messages !== undefined; let streamedOutputValue; if (isChatModel) { if (isChatGenerationChunk(kwargs?.chunk)) { streamedOutputValue = kwargs?.chunk; } else { streamedOutputValue = new AIMessageChunk({ id: `run-${run.id}`, content: token, }); } } else { streamedOutputValue = token; } const patch = new RunLogPatch({ ops: [ { op: "add", path: `/logs/${runName}/streamed_output_str/-`, value: token, }, { op: "add", path: `/logs/${runName}/streamed_output/-`, value: streamedOutputValue, }, ], }); await this.writer.write(patch); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tracers/tracer_langchain.ts
import type { Client, LangSmithTracingClientInterface } from "langsmith"; import { RunTree } from "langsmith/run_trees"; import { getCurrentRunTree } from "langsmith/singletons/traceable"; import { BaseRun, RunCreate, RunUpdate as BaseRunUpdate, KVMap, } from "langsmith/schemas"; import { getEnvironmentVariable, getRuntimeEnvironment } from "../utils/env.js"; import { BaseTracer } from "./base.js"; import { BaseCallbackHandlerInput } from "../callbacks/base.js"; import { getDefaultLangChainClientSingleton } from "../singletons/tracer.js"; export interface Run extends BaseRun { id: string; child_runs: this[]; child_execution_order: number; dotted_order?: string; trace_id?: string; } export interface RunCreate2 extends RunCreate { trace_id?: string; dotted_order?: string; } export interface RunUpdate extends BaseRunUpdate { events: BaseRun["events"]; inputs: KVMap; trace_id?: string; dotted_order?: string; } export interface LangChainTracerFields extends BaseCallbackHandlerInput { exampleId?: string; projectName?: string; client?: LangSmithTracingClientInterface; } export class LangChainTracer extends BaseTracer implements LangChainTracerFields { name = "langchain_tracer"; projectName?: string; exampleId?: string; client: LangSmithTracingClientInterface; constructor(fields: LangChainTracerFields = {}) { super(fields); const { exampleId, projectName, client } = fields; this.projectName = projectName ?? getEnvironmentVariable("LANGCHAIN_PROJECT") ?? getEnvironmentVariable("LANGCHAIN_SESSION"); this.exampleId = exampleId; this.client = client ?? getDefaultLangChainClientSingleton(); const traceableTree = LangChainTracer.getTraceableRunTree(); if (traceableTree) { this.updateFromRunTree(traceableTree); } } private async _convertToCreate( run: Run, example_id: string | undefined = undefined ): Promise<RunCreate> { return { ...run, extra: { ...run.extra, runtime: await getRuntimeEnvironment(), }, child_runs: undefined, session_name: this.projectName, reference_example_id: run.parent_run_id ? undefined : example_id, }; } protected async persistRun(_run: Run): Promise<void> {} async onRunCreate(run: Run): Promise<void> { const persistedRun: RunCreate2 = await this._convertToCreate( run, this.exampleId ); await this.client.createRun(persistedRun); } async onRunUpdate(run: Run): Promise<void> { const runUpdate: RunUpdate = { end_time: run.end_time, error: run.error, outputs: run.outputs, events: run.events, inputs: run.inputs, trace_id: run.trace_id, dotted_order: run.dotted_order, parent_run_id: run.parent_run_id, }; await this.client.updateRun(run.id, runUpdate); } getRun(id: string): Run | undefined { return this.runMap.get(id); } updateFromRunTree(runTree: RunTree) { let rootRun: RunTree = runTree; const visited = new Set<string>(); while (rootRun.parent_run) { if (visited.has(rootRun.id)) break; visited.add(rootRun.id); if (!rootRun.parent_run) break; rootRun = rootRun.parent_run as RunTree; } visited.clear(); const queue = [rootRun]; while (queue.length > 0) { const current = queue.shift(); if (!current || visited.has(current.id)) continue; visited.add(current.id); // @ts-expect-error Types of property 'events' are incompatible. this.runMap.set(current.id, current); if (current.child_runs) { queue.push(...current.child_runs); } } this.client = runTree.client ?? this.client; this.projectName = runTree.project_name ?? this.projectName; this.exampleId = runTree.reference_example_id ?? this.exampleId; } convertToRunTree(id: string): RunTree | undefined { const runTreeMap: Record<string, RunTree> = {}; const runTreeList: [id: string, dotted_order: string | undefined][] = []; for (const [id, run] of this.runMap) { // by converting the run map to a run tree, we are doing a copy // thus, any mutation performed on the run tree will not be reflected // back in the run map // TODO: Stop using `this.runMap` in favour of LangSmith's `RunTree` const runTree = new RunTree({ ...run, child_runs: [], parent_run: undefined, // inherited properties client: this.client as Client, project_name: this.projectName, reference_example_id: this.exampleId, tracingEnabled: true, }); runTreeMap[id] = runTree; runTreeList.push([id, run.dotted_order]); } runTreeList.sort((a, b) => { if (!a[1] || !b[1]) return 0; return a[1].localeCompare(b[1]); }); for (const [id] of runTreeList) { const run = this.runMap.get(id); const runTree = runTreeMap[id]; if (!run || !runTree) continue; if (run.parent_run_id) { const parentRunTree = runTreeMap[run.parent_run_id]; if (parentRunTree) { parentRunTree.child_runs.push(runTree); runTree.parent_run = parentRunTree; } } } return runTreeMap[id]; } static getTraceableRunTree(): RunTree | undefined { try { return getCurrentRunTree(); } catch { return undefined; } } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tracers/console.ts
import type { CSPair } from "ansi-styles"; import styles from "ansi-styles"; import { BaseTracer, type AgentRun, type Run } from "./base.js"; function wrap(style: CSPair, text: string) { return `${style.open}${text}${style.close}`; } function tryJsonStringify(obj: unknown, fallback: string) { try { return JSON.stringify(obj, null, 2); } catch (err) { return fallback; } } function formatKVMapItem(value: unknown) { if (typeof value === "string") { return value.trim(); } if (value === null || value === undefined) { return value; } return tryJsonStringify(value, value.toString()); } function elapsed(run: Run): string { if (!run.end_time) return ""; const elapsed = run.end_time - run.start_time; if (elapsed < 1000) { return `${elapsed}ms`; } return `${(elapsed / 1000).toFixed(2)}s`; } const { color } = styles; /** * A tracer that logs all events to the console. It extends from the * `BaseTracer` class and overrides its methods to provide custom logging * functionality. * @example * ```typescript * * const llm = new ChatAnthropic({ * temperature: 0, * tags: ["example", "callbacks", "constructor"], * callbacks: [new ConsoleCallbackHandler()], * }); * * ``` */ export class ConsoleCallbackHandler extends BaseTracer { name = "console_callback_handler" as const; /** * Method used to persist the run. In this case, it simply returns a * resolved promise as there's no persistence logic. * @param _run The run to persist. * @returns A resolved promise. */ protected persistRun(_run: Run) { return Promise.resolve(); } // utility methods /** * Method used to get all the parent runs of a given run. * @param run The run whose parents are to be retrieved. * @returns An array of parent runs. */ getParents(run: Run) { const parents: Run[] = []; let currentRun = run; while (currentRun.parent_run_id) { const parent = this.runMap.get(currentRun.parent_run_id); if (parent) { parents.push(parent); currentRun = parent; } else { break; } } return parents; } /** * Method used to get a string representation of the run's lineage, which * is used in logging. * @param run The run whose lineage is to be retrieved. * @returns A string representation of the run's lineage. */ getBreadcrumbs(run: Run) { const parents = this.getParents(run).reverse(); const string = [...parents, run] .map((parent, i, arr) => { const name = `${parent.execution_order}:${parent.run_type}:${parent.name}`; return i === arr.length - 1 ? wrap(styles.bold, name) : name; }) .join(" > "); return wrap(color.grey, string); } // logging methods /** * Method used to log the start of a chain run. * @param run The chain run that has started. * @returns void */ onChainStart(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap( color.green, "[chain/start]" )} [${crumbs}] Entering Chain run with input: ${tryJsonStringify( run.inputs, "[inputs]" )}` ); } /** * Method used to log the end of a chain run. * @param run The chain run that has ended. * @returns void */ onChainEnd(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap(color.cyan, "[chain/end]")} [${crumbs}] [${elapsed( run )}] Exiting Chain run with output: ${tryJsonStringify( run.outputs, "[outputs]" )}` ); } /** * Method used to log any errors of a chain run. * @param run The chain run that has errored. * @returns void */ onChainError(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap(color.red, "[chain/error]")} [${crumbs}] [${elapsed( run )}] Chain run errored with error: ${tryJsonStringify( run.error, "[error]" )}` ); } /** * Method used to log the start of an LLM run. * @param run The LLM run that has started. * @returns void */ onLLMStart(run: Run) { const crumbs = this.getBreadcrumbs(run); const inputs = "prompts" in run.inputs ? { prompts: (run.inputs.prompts as string[]).map((p) => p.trim()) } : run.inputs; console.log( `${wrap( color.green, "[llm/start]" )} [${crumbs}] Entering LLM run with input: ${tryJsonStringify( inputs, "[inputs]" )}` ); } /** * Method used to log the end of an LLM run. * @param run The LLM run that has ended. * @returns void */ onLLMEnd(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap(color.cyan, "[llm/end]")} [${crumbs}] [${elapsed( run )}] Exiting LLM run with output: ${tryJsonStringify( run.outputs, "[response]" )}` ); } /** * Method used to log any errors of an LLM run. * @param run The LLM run that has errored. * @returns void */ onLLMError(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap(color.red, "[llm/error]")} [${crumbs}] [${elapsed( run )}] LLM run errored with error: ${tryJsonStringify(run.error, "[error]")}` ); } /** * Method used to log the start of a tool run. * @param run The tool run that has started. * @returns void */ onToolStart(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap( color.green, "[tool/start]" )} [${crumbs}] Entering Tool run with input: "${formatKVMapItem( run.inputs.input )}"` ); } /** * Method used to log the end of a tool run. * @param run The tool run that has ended. * @returns void */ onToolEnd(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap(color.cyan, "[tool/end]")} [${crumbs}] [${elapsed( run )}] Exiting Tool run with output: "${formatKVMapItem( run.outputs?.output )}"` ); } /** * Method used to log any errors of a tool run. * @param run The tool run that has errored. * @returns void */ onToolError(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap(color.red, "[tool/error]")} [${crumbs}] [${elapsed( run )}] Tool run errored with error: ${tryJsonStringify( run.error, "[error]" )}` ); } /** * Method used to log the start of a retriever run. * @param run The retriever run that has started. * @returns void */ onRetrieverStart(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap( color.green, "[retriever/start]" )} [${crumbs}] Entering Retriever run with input: ${tryJsonStringify( run.inputs, "[inputs]" )}` ); } /** * Method used to log the end of a retriever run. * @param run The retriever run that has ended. * @returns void */ onRetrieverEnd(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap(color.cyan, "[retriever/end]")} [${crumbs}] [${elapsed( run )}] Exiting Retriever run with output: ${tryJsonStringify( run.outputs, "[outputs]" )}` ); } /** * Method used to log any errors of a retriever run. * @param run The retriever run that has errored. * @returns void */ onRetrieverError(run: Run) { const crumbs = this.getBreadcrumbs(run); console.log( `${wrap(color.red, "[retriever/error]")} [${crumbs}] [${elapsed( run )}] Retriever run errored with error: ${tryJsonStringify( run.error, "[error]" )}` ); } /** * Method used to log the action selected by the agent. * @param run The run in which the agent action occurred. * @returns void */ onAgentAction(run: Run) { const agentRun = run as AgentRun; const crumbs = this.getBreadcrumbs(run); console.log( `${wrap( color.blue, "[agent/action]" )} [${crumbs}] Agent selected action: ${tryJsonStringify( agentRun.actions[agentRun.actions.length - 1], "[action]" )}` ); } }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tracers/initialize.ts
import { LangChainTracer } from "./tracer_langchain.js"; import { LangChainTracerV1 } from "./tracer_langchain_v1.js"; /** * @deprecated Use the V2 handler instead. * * Function that returns an instance of `LangChainTracerV1`. If a session * is provided, it loads that session into the tracer; otherwise, it loads * a default session. * @param session Optional session to load into the tracer. * @returns An instance of `LangChainTracerV1`. */ export async function getTracingCallbackHandler( session?: string ): Promise<LangChainTracerV1> { const tracer = new LangChainTracerV1(); if (session) { await tracer.loadSession(session); } else { await tracer.loadDefaultSession(); } return tracer; } /** * @deprecated Instantiate directly using the LangChainTracer constructor. * * Function that returns an instance of `LangChainTracer`. It does not * load any session data. * @returns An instance of `LangChainTracer`. */ export async function getTracingV2CallbackHandler(): Promise<LangChainTracer> { return new LangChainTracer(); }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tracers/base.ts
import { KVMap, BaseRun } from "langsmith/schemas"; import type { ChainValues } from "../utils/types/index.js"; import type { AgentAction, AgentFinish } from "../agents.js"; import type { LLMResult } from "../outputs.js"; import type { BaseMessage } from "../messages/base.js"; import { Serialized } from "../load/serializable.js"; import { BaseCallbackHandler, BaseCallbackHandlerInput, HandleLLMNewTokenCallbackFields, NewTokenIndices, } from "../callbacks/base.js"; import type { Document } from "../documents/document.js"; export type RunType = string; export interface Run extends BaseRun { // some optional fields are always present here id: string; start_time: number; execution_order: number; // some additional fields that don't exist in sdk runs child_runs: this[]; child_execution_order: number; events: Array<{ name: string; time: string; kwargs?: Record<string, unknown>; }>; trace_id?: string; dotted_order?: string; } export interface AgentRun extends Run { actions: AgentAction[]; } // eslint-disable-next-line @typescript-eslint/no-explicit-any function _coerceToDict(value: any, defaultKey: string) { return value && !Array.isArray(value) && typeof value === "object" ? value : { [defaultKey]: value }; } function stripNonAlphanumeric(input: string) { return input.replace(/[-:.]/g, ""); } function convertToDottedOrderFormat( epoch: number, runId: string, executionOrder: number ) { const paddedOrder = executionOrder.toFixed(0).slice(0, 3).padStart(3, "0"); return ( stripNonAlphanumeric( `${new Date(epoch).toISOString().slice(0, -1)}${paddedOrder}Z` ) + runId ); } export function isBaseTracer(x: BaseCallbackHandler): x is BaseTracer { return typeof (x as BaseTracer)._addRunToRunMap === "function"; } export abstract class BaseTracer extends BaseCallbackHandler { protected runMap: Map<string, Run> = new Map(); constructor(_fields?: BaseCallbackHandlerInput) { super(...arguments); } copy(): this { return this; } protected stringifyError(error: unknown) { // eslint-disable-next-line no-instanceof/no-instanceof if (error instanceof Error) { return error.message + (error?.stack ? `\n\n${error.stack}` : ""); } if (typeof error === "string") { return error; } return `${error}`; } protected abstract persistRun(run: Run): Promise<void>; protected _addChildRun(parentRun: Run, childRun: Run) { parentRun.child_runs.push(childRun); } _addRunToRunMap(run: Run) { const currentDottedOrder = convertToDottedOrderFormat( run.start_time, run.id, run.execution_order ); const storedRun = { ...run }; if (storedRun.parent_run_id !== undefined) { const parentRun = this.runMap.get(storedRun.parent_run_id); if (parentRun) { this._addChildRun(parentRun, storedRun); parentRun.child_execution_order = Math.max( parentRun.child_execution_order, storedRun.child_execution_order ); storedRun.trace_id = parentRun.trace_id; if (parentRun.dotted_order !== undefined) { storedRun.dotted_order = [ parentRun.dotted_order, currentDottedOrder, ].join("."); } else { // This can happen naturally for callbacks added within a run // console.debug(`Parent run with UUID ${storedRun.parent_run_id} has no dotted order.`); } } else { // This can happen naturally for callbacks added within a run // console.debug( // `Parent run with UUID ${storedRun.parent_run_id} not found.` // ); } } else { storedRun.trace_id = storedRun.id; storedRun.dotted_order = currentDottedOrder; } this.runMap.set(storedRun.id, storedRun); return storedRun; } protected async _endTrace(run: Run): Promise<void> { const parentRun = run.parent_run_id !== undefined && this.runMap.get(run.parent_run_id); if (parentRun) { parentRun.child_execution_order = Math.max( parentRun.child_execution_order, run.child_execution_order ); } else { await this.persistRun(run); } this.runMap.delete(run.id); await this.onRunUpdate?.(run); } protected _getExecutionOrder(parentRunId: string | undefined): number { const parentRun = parentRunId !== undefined && this.runMap.get(parentRunId); // If a run has no parent then execution order is 1 if (!parentRun) { return 1; } return parentRun.child_execution_order + 1; } /** * Create and add a run to the run map for LLM start events. * This must sometimes be done synchronously to avoid race conditions * when callbacks are backgrounded, so we expose it as a separate method here. */ _createRunForLLMStart( llm: Serialized, prompts: string[], runId: string, parentRunId?: string, extraParams?: KVMap, tags?: string[], metadata?: KVMap, name?: string ) { const execution_order = this._getExecutionOrder(parentRunId); const start_time = Date.now(); const finalExtraParams = metadata ? { ...extraParams, metadata } : extraParams; const run: Run = { id: runId, name: name ?? llm.id[llm.id.length - 1], parent_run_id: parentRunId, start_time, serialized: llm, events: [ { name: "start", time: new Date(start_time).toISOString(), }, ], inputs: { prompts }, execution_order, child_runs: [], child_execution_order: execution_order, run_type: "llm", extra: finalExtraParams ?? {}, tags: tags || [], }; return this._addRunToRunMap(run); } async handleLLMStart( llm: Serialized, prompts: string[], runId: string, parentRunId?: string, extraParams?: KVMap, tags?: string[], metadata?: KVMap, name?: string ): Promise<Run> { const run = this.runMap.get(runId) ?? this._createRunForLLMStart( llm, prompts, runId, parentRunId, extraParams, tags, metadata, name ); await this.onRunCreate?.(run); await this.onLLMStart?.(run); return run; } /** * Create and add a run to the run map for chat model start events. * This must sometimes be done synchronously to avoid race conditions * when callbacks are backgrounded, so we expose it as a separate method here. */ _createRunForChatModelStart( llm: Serialized, messages: BaseMessage[][], runId: string, parentRunId?: string, extraParams?: KVMap, tags?: string[], metadata?: KVMap, name?: string ) { const execution_order = this._getExecutionOrder(parentRunId); const start_time = Date.now(); const finalExtraParams = metadata ? { ...extraParams, metadata } : extraParams; const run: Run = { id: runId, name: name ?? llm.id[llm.id.length - 1], parent_run_id: parentRunId, start_time, serialized: llm, events: [ { name: "start", time: new Date(start_time).toISOString(), }, ], inputs: { messages }, execution_order, child_runs: [], child_execution_order: execution_order, run_type: "llm", extra: finalExtraParams ?? {}, tags: tags || [], }; return this._addRunToRunMap(run); } async handleChatModelStart( llm: Serialized, messages: BaseMessage[][], runId: string, parentRunId?: string, extraParams?: KVMap, tags?: string[], metadata?: KVMap, name?: string ): Promise<Run> { const run = this.runMap.get(runId) ?? this._createRunForChatModelStart( llm, messages, runId, parentRunId, extraParams, tags, metadata, name ); await this.onRunCreate?.(run); await this.onLLMStart?.(run); return run; } async handleLLMEnd(output: LLMResult, runId: string): Promise<Run> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "llm") { throw new Error("No LLM run to end."); } run.end_time = Date.now(); run.outputs = output; run.events.push({ name: "end", time: new Date(run.end_time).toISOString(), }); await this.onLLMEnd?.(run); await this._endTrace(run); return run; } async handleLLMError(error: unknown, runId: string): Promise<Run> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "llm") { throw new Error("No LLM run to end."); } run.end_time = Date.now(); run.error = this.stringifyError(error); run.events.push({ name: "error", time: new Date(run.end_time).toISOString(), }); await this.onLLMError?.(run); await this._endTrace(run); return run; } /** * Create and add a run to the run map for chain start events. * This must sometimes be done synchronously to avoid race conditions * when callbacks are backgrounded, so we expose it as a separate method here. */ _createRunForChainStart( chain: Serialized, inputs: ChainValues, runId: string, parentRunId?: string, tags?: string[], metadata?: KVMap, runType?: string, name?: string ) { const execution_order = this._getExecutionOrder(parentRunId); const start_time = Date.now(); const run: Run = { id: runId, name: name ?? chain.id[chain.id.length - 1], parent_run_id: parentRunId, start_time, serialized: chain, events: [ { name: "start", time: new Date(start_time).toISOString(), }, ], inputs, execution_order, child_execution_order: execution_order, run_type: runType ?? "chain", child_runs: [], extra: metadata ? { metadata } : {}, tags: tags || [], }; return this._addRunToRunMap(run); } async handleChainStart( chain: Serialized, inputs: ChainValues, runId: string, parentRunId?: string, tags?: string[], metadata?: KVMap, runType?: string, name?: string ): Promise<Run> { const run = this.runMap.get(runId) ?? this._createRunForChainStart( chain, inputs, runId, parentRunId, tags, metadata, runType, name ); await this.onRunCreate?.(run); await this.onChainStart?.(run); return run; } async handleChainEnd( outputs: ChainValues, runId: string, _parentRunId?: string, _tags?: string[], kwargs?: { inputs?: Record<string, unknown> } ): Promise<Run> { const run = this.runMap.get(runId); if (!run) { throw new Error("No chain run to end."); } run.end_time = Date.now(); run.outputs = _coerceToDict(outputs, "output"); run.events.push({ name: "end", time: new Date(run.end_time).toISOString(), }); if (kwargs?.inputs !== undefined) { run.inputs = _coerceToDict(kwargs.inputs, "input"); } await this.onChainEnd?.(run); await this._endTrace(run); return run; } async handleChainError( error: unknown, runId: string, _parentRunId?: string, _tags?: string[], kwargs?: { inputs?: Record<string, unknown> } ): Promise<Run> { const run = this.runMap.get(runId); if (!run) { throw new Error("No chain run to end."); } run.end_time = Date.now(); run.error = this.stringifyError(error); run.events.push({ name: "error", time: new Date(run.end_time).toISOString(), }); if (kwargs?.inputs !== undefined) { run.inputs = _coerceToDict(kwargs.inputs, "input"); } await this.onChainError?.(run); await this._endTrace(run); return run; } /** * Create and add a run to the run map for tool start events. * This must sometimes be done synchronously to avoid race conditions * when callbacks are backgrounded, so we expose it as a separate method here. */ _createRunForToolStart( tool: Serialized, input: string, runId: string, parentRunId?: string, tags?: string[], metadata?: KVMap, name?: string ) { const execution_order = this._getExecutionOrder(parentRunId); const start_time = Date.now(); const run: Run = { id: runId, name: name ?? tool.id[tool.id.length - 1], parent_run_id: parentRunId, start_time, serialized: tool, events: [ { name: "start", time: new Date(start_time).toISOString(), }, ], inputs: { input }, execution_order, child_execution_order: execution_order, run_type: "tool", child_runs: [], extra: metadata ? { metadata } : {}, tags: tags || [], }; return this._addRunToRunMap(run); } async handleToolStart( tool: Serialized, input: string, runId: string, parentRunId?: string, tags?: string[], metadata?: KVMap, name?: string ): Promise<Run> { const run = this.runMap.get(runId) ?? this._createRunForToolStart( tool, input, runId, parentRunId, tags, metadata, name ); await this.onRunCreate?.(run); await this.onToolStart?.(run); return run; } // eslint-disable-next-line @typescript-eslint/no-explicit-any async handleToolEnd(output: any, runId: string): Promise<Run> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "tool") { throw new Error("No tool run to end"); } run.end_time = Date.now(); run.outputs = { output }; run.events.push({ name: "end", time: new Date(run.end_time).toISOString(), }); await this.onToolEnd?.(run); await this._endTrace(run); return run; } async handleToolError(error: unknown, runId: string): Promise<Run> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "tool") { throw new Error("No tool run to end"); } run.end_time = Date.now(); run.error = this.stringifyError(error); run.events.push({ name: "error", time: new Date(run.end_time).toISOString(), }); await this.onToolError?.(run); await this._endTrace(run); return run; } async handleAgentAction(action: AgentAction, runId: string): Promise<void> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "chain") { return; } const agentRun = run as AgentRun; agentRun.actions = agentRun.actions || []; agentRun.actions.push(action); agentRun.events.push({ name: "agent_action", time: new Date().toISOString(), kwargs: { action }, }); await this.onAgentAction?.(run as AgentRun); } async handleAgentEnd(action: AgentFinish, runId: string): Promise<void> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "chain") { return; } run.events.push({ name: "agent_end", time: new Date().toISOString(), kwargs: { action }, }); await this.onAgentEnd?.(run); } /** * Create and add a run to the run map for retriever start events. * This must sometimes be done synchronously to avoid race conditions * when callbacks are backgrounded, so we expose it as a separate method here. */ _createRunForRetrieverStart( retriever: Serialized, query: string, runId: string, parentRunId?: string, tags?: string[], metadata?: KVMap, name?: string ) { const execution_order = this._getExecutionOrder(parentRunId); const start_time = Date.now(); const run: Run = { id: runId, name: name ?? retriever.id[retriever.id.length - 1], parent_run_id: parentRunId, start_time, serialized: retriever, events: [ { name: "start", time: new Date(start_time).toISOString(), }, ], inputs: { query }, execution_order, child_execution_order: execution_order, run_type: "retriever", child_runs: [], extra: metadata ? { metadata } : {}, tags: tags || [], }; return this._addRunToRunMap(run); } async handleRetrieverStart( retriever: Serialized, query: string, runId: string, parentRunId?: string, tags?: string[], metadata?: KVMap, name?: string ): Promise<Run> { const run = this.runMap.get(runId) ?? this._createRunForRetrieverStart( retriever, query, runId, parentRunId, tags, metadata, name ); await this.onRunCreate?.(run); await this.onRetrieverStart?.(run); return run; } async handleRetrieverEnd( documents: Document<Record<string, unknown>>[], runId: string ): Promise<Run> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "retriever") { throw new Error("No retriever run to end"); } run.end_time = Date.now(); run.outputs = { documents }; run.events.push({ name: "end", time: new Date(run.end_time).toISOString(), }); await this.onRetrieverEnd?.(run); await this._endTrace(run); return run; } async handleRetrieverError(error: unknown, runId: string): Promise<Run> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "retriever") { throw new Error("No retriever run to end"); } run.end_time = Date.now(); run.error = this.stringifyError(error); run.events.push({ name: "error", time: new Date(run.end_time).toISOString(), }); await this.onRetrieverError?.(run); await this._endTrace(run); return run; } async handleText(text: string, runId: string): Promise<void> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "chain") { return; } run.events.push({ name: "text", time: new Date().toISOString(), kwargs: { text }, }); await this.onText?.(run); } async handleLLMNewToken( token: string, idx: NewTokenIndices, runId: string, _parentRunId?: string, _tags?: string[], fields?: HandleLLMNewTokenCallbackFields ): Promise<Run> { const run = this.runMap.get(runId); if (!run || run?.run_type !== "llm") { throw new Error( `Invalid "runId" provided to "handleLLMNewToken" callback.` ); } run.events.push({ name: "new_token", time: new Date().toISOString(), kwargs: { token, idx, chunk: fields?.chunk }, }); await this.onLLMNewToken?.(run, token, { chunk: fields?.chunk }); return run; } // custom event handlers onRunCreate?(run: Run): void | Promise<void>; onRunUpdate?(run: Run): void | Promise<void>; onLLMStart?(run: Run): void | Promise<void>; onLLMEnd?(run: Run): void | Promise<void>; onLLMError?(run: Run): void | Promise<void>; onChainStart?(run: Run): void | Promise<void>; onChainEnd?(run: Run): void | Promise<void>; onChainError?(run: Run): void | Promise<void>; onToolStart?(run: Run): void | Promise<void>; onToolEnd?(run: Run): void | Promise<void>; onToolError?(run: Run): void | Promise<void>; onAgentAction?(run: Run): void | Promise<void>; onAgentEnd?(run: Run): void | Promise<void>; onRetrieverStart?(run: Run): void | Promise<void>; onRetrieverEnd?(run: Run): void | Promise<void>; onRetrieverError?(run: Run): void | Promise<void>; onText?(run: Run): void | Promise<void>; onLLMNewToken?( run: Run, token: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any kwargs?: { chunk: any } ): void | Promise<void>; }
0
lc_public_repos/langchainjs/langchain-core/src
lc_public_repos/langchainjs/langchain-core/src/tracers/run_collector.ts
import { BaseRun, Run } from "langsmith/schemas"; import { BaseTracer } from "./base.js"; /** * A callback handler that collects traced runs and makes it easy to fetch the traced run object from calls through any langchain object. * For instance, it makes it easy to fetch the run ID and then do things with that, such as log feedback. */ export class RunCollectorCallbackHandler extends BaseTracer { /** The name of the callback handler. */ name = "run_collector"; /** The ID of the example. */ exampleId?: string; /** An array of traced runs. */ tracedRuns: Run[]; /** * Creates a new instance of the RunCollectorCallbackHandler class. * @param exampleId The ID of the example. */ constructor({ exampleId }: { exampleId?: string } = {}) { super({ _awaitHandler: true }); this.exampleId = exampleId; this.tracedRuns = []; } /** * Persists the given run object. * @param run The run object to persist. */ protected async persistRun(run: BaseRun): Promise<void> { const run_ = { ...run } as Run; run_.reference_example_id = this.exampleId; this.tracedRuns.push(run_); } }