Skip to content

LangChain Browser Integration Guide

This guide provides detailed patterns and best practices for integrating LangChain functionality within browser extension environments, focusing on memory management, tool integration, and browser-specific optimizations.

LangChain components work differently in browser environments due to security restrictions and performance considerations:

// Browser-optimized LangChain setup
const browserLLM = new ChatOpenAI({
openAIApiKey: await getSecureApiKey(), // From extension storage
streaming: true, // Enable streaming for better UX
maxTokens: 2048, // Limit for browser performance
temperature: 0.7
});
// Browser-compatible memory with persistence
const browserMemory = new BufferWindowMemory({
k: 10, // Limit memory window for performance
memoryKey: "chat_history",
returnMessages: true,
// Store in browser extension storage
storage: new BrowserExtensionStorage()
});

Implement persistent memory using browser extension storage APIs:

class BrowserExtensionStorage {
async save(key, data) {
return new Promise((resolve) => {
chrome.storage.local.set({ [key]: data }, resolve);
});
}
async load(key) {
return new Promise((resolve) => {
chrome.storage.local.get([key], (result) => {
resolve(result[key] || null);
});
});
}
async clear(key) {
return new Promise((resolve) => {
chrome.storage.local.remove([key], resolve);
});
}
}
// Usage in LangChain workflows
const persistentMemory = new ConversationSummaryBufferMemory({
llm: browserLLM,
maxTokenLimit: 1000,
storage: new BrowserExtensionStorage(),
storageKey: "conversation_memory"
});

Maintain AI context across different web pages:

// Page-aware memory system
class PageAwareMemory extends BaseMemory {
constructor(options) {
super();
this.storage = options.storage;
this.currentUrl = null;
this.globalMemory = new Map();
this.pageSpecificMemory = new Map();
}
async saveContext(inputs, outputs) {
const url = await this.getCurrentPageUrl();
// Save to global memory
await this.storage.save('global_memory', {
timestamp: Date.now(),
url: url,
input: inputs,
output: outputs
});
// Save to page-specific memory
const pageKey = `page_memory_${this.hashUrl(url)}`;
await this.storage.save(pageKey, {
interactions: [...(await this.getPageMemory(url)), {
timestamp: Date.now(),
input: inputs,
output: outputs
}]
});
}
async loadMemoryVariables(inputs) {
const url = await this.getCurrentPageUrl();
const globalContext = await this.storage.load('global_memory');
const pageContext = await this.getPageMemory(url);
return {
history: this.formatMemoryForLLM(globalContext, pageContext),
page_context: url,
previous_interactions: pageContext.length
};
}
}

Memory Optimization for Browser Performance

Section titled “Memory Optimization for Browser Performance”

Implement memory management that respects browser resource constraints:

// Memory-efficient conversation buffer
class BrowserOptimizedMemory extends ConversationBufferMemory {
constructor(options) {
super({
...options,
maxTokenLimit: 1500, // Browser-appropriate limit
pruneStrategy: 'sliding_window'
});
this.compressionThreshold = 0.8;
}
async prune() {
const memorySize = await this.estimateTokenCount();
const maxSize = this.maxTokenLimit * this.compressionThreshold;
if (memorySize > maxSize) {
// Compress older memories using summarization
const oldMemories = this.chatMemory.messages.slice(0, -5);
const summary = await this.llm.call([
new SystemMessage("Summarize the following conversation history concisely:"),
...oldMemories
]);
// Replace old memories with summary
this.chatMemory.messages = [
new SystemMessage(`Previous conversation summary: ${summary.content}`),
...this.chatMemory.messages.slice(-5)
];
}
}
}

Create LangChain tools that leverage browser extension capabilities:

// Text extraction tool
class BrowserTextExtractionTool extends Tool {
name = "browser_text_extractor";
description = "Extract text content from the current web page or user selection";
async _call(input) {
const { type, selector } = JSON.parse(input);
switch (type) {
case "selected":
return await this.getSelectedText();
case "full_page":
return await this.getAllText();
case "element":
return await this.getElementText(selector);
default:
throw new Error(`Unknown extraction type: ${type}`);
}
}
async getSelectedText() {
return new Promise((resolve) => {
chrome.tabs.executeScript({
code: 'window.getSelection().toString()'
}, (result) => resolve(result[0] || ""));
});
}
async getAllText() {
return new Promise((resolve) => {
chrome.tabs.executeScript({
code: 'document.body.innerText'
}, (result) => resolve(result[0] || ""));
});
}
}
// HTML analysis tool
class BrowserHTMLAnalysisTool extends Tool {
name = "html_analyzer";
description = "Analyze HTML structure and extract specific elements from web pages";
async _call(input) {
const { action, selector, attribute } = JSON.parse(input);
return new Promise((resolve) => {
const code = this.generateExtractionCode(action, selector, attribute);
chrome.tabs.executeScript({ code }, (result) => {
resolve(JSON.stringify(result[0] || {}));
});
});
}
generateExtractionCode(action, selector, attribute) {
switch (action) {
case "get_links":
return `Array.from(document.querySelectorAll('a')).map(a => ({
href: a.href,
text: a.textContent.trim(),
title: a.title
}))`;
case "get_images":
return `Array.from(document.querySelectorAll('img')).map(img => ({
src: img.src,
alt: img.alt,
title: img.title
}))`;
case "get_forms":
return `Array.from(document.querySelectorAll('form')).map(form => ({
action: form.action,
method: form.method,
fields: Array.from(form.elements).map(el => ({
name: el.name,
type: el.type,
value: el.value
}))
}))`;
default:
return `document.querySelector('${selector}')?.${attribute || 'textContent'}`;
}
}
}

Combine multiple browser tools for sophisticated AI workflows:

// Intelligent web research agent
class WebResearchAgent {
constructor() {
this.tools = [
new BrowserTextExtractionTool(),
new BrowserHTMLAnalysisTool(),
new BrowserNavigationTool(),
new WebSearchTool(),
new ContentSummarizerTool()
];
this.agent = new AgentExecutor({
agent: new ZeroShotAgent({
llmChain: new LLMChain({
llm: new ChatOpenAI({ temperature: 0 }),
prompt: this.createResearchPrompt()
}),
allowedTools: this.tools.map(tool => tool.name)
}),
tools: this.tools,
verbose: true,
maxIterations: 10
});
}
createResearchPrompt() {
return new PromptTemplate({
template: `You are a web research assistant with access to browser tools.
Available tools:
{tools}
Use these tools to research the topic: {input}
Current page context: {page_context}
Research history: {agent_scratchpad}
Think step by step:
1. Analyze the current page for relevant information
2. Extract key data points
3. Navigate to related pages if needed
4. Synthesize findings into a comprehensive response
Question: {input}
{agent_scratchpad}`,
inputVariables: ["input", "page_context", "agent_scratchpad", "tools"]
});
}
async research(query, context = {}) {
const pageContext = await this.getCurrentPageContext();
return await this.agent.call({
input: query,
page_context: JSON.stringify(pageContext),
...context
});
}
}

Work within browser security constraints:

// CSP-compliant AI model access
class SecureLLMProvider {
constructor() {
this.apiEndpoint = this.detectAllowedEndpoint();
this.fallbackMode = false;
}
async detectAllowedEndpoint() {
const endpoints = [
'https://api.openai.com',
'https://api.anthropic.com',
'https://api.cohere.ai'
];
for (const endpoint of endpoints) {
try {
await fetch(`${endpoint}/health`, { method: 'HEAD' });
return endpoint;
} catch (error) {
console.log(`Endpoint ${endpoint} blocked by CSP`);
}
}
this.fallbackMode = true;
return null;
}
async callLLM(prompt, options = {}) {
if (this.fallbackMode) {
return await this.useBackgroundScript(prompt, options);
}
return await this.directAPICall(prompt, options);
}
async useBackgroundScript(prompt, options) {
return new Promise((resolve) => {
chrome.runtime.sendMessage({
type: 'LLM_CALL',
prompt: prompt,
options: options
}, resolve);
});
}
}

Implement AI workflows that respect user privacy:

// Privacy-aware data processing
class PrivacyAwareProcessor {
constructor() {
this.sensitivePatterns = [
/\b\d{4}[-\s]?\d{4}[-\s]?\d{4}[-\s]?\d{4}\b/, // Credit cards
/\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b/, // Emails
/\b\d{3}[-.]?\d{3}[-.]?\d{4}\b/ // Phone numbers
];
}
async processContent(content, options = {}) {
if (options.preservePrivacy) {
content = this.sanitizeContent(content);
}
const result = await this.llm.call(content);
if (options.preservePrivacy) {
// Don't store sensitive results
return result;
}
await this.storeResult(result);
return result;
}
sanitizeContent(content) {
let sanitized = content;
this.sensitivePatterns.forEach(pattern => {
sanitized = sanitized.replace(pattern, '[REDACTED]');
});
return sanitized;
}
}

Implement streaming responses for better user experience:

// Streaming LLM responses in browser
class StreamingBrowserLLM {
constructor(options) {
this.llm = new ChatOpenAI({
...options,
streaming: true,
callbacks: [new StreamingCallbackHandler()]
});
}
async streamResponse(prompt, onToken, onComplete) {
const stream = await this.llm.stream(prompt);
for await (const chunk of stream) {
onToken(chunk.content);
}
onComplete();
}
}
// Usage in browser extension
const streamingLLM = new StreamingBrowserLLM({
openAIApiKey: apiKey,
temperature: 0.7
});
await streamingLLM.streamResponse(
"Analyze this web page content...",
(token) => {
// Update UI progressively
updateResponseDisplay(token);
},
() => {
// Finalize response
finalizeResponse();
}
);

Implement intelligent caching for repeated operations:

// Browser-based LLM response cache
class BrowserLLMCache {
constructor() {
this.cache = new Map();
this.maxSize = 100;
this.ttl = 3600000; // 1 hour
}
generateKey(prompt, options) {
return btoa(JSON.stringify({ prompt, options }));
}
async get(prompt, options) {
const key = this.generateKey(prompt, options);
const cached = this.cache.get(key);
if (cached && Date.now() - cached.timestamp < this.ttl) {
return cached.response;
}
return null;
}
async set(prompt, options, response) {
const key = this.generateKey(prompt, options);
if (this.cache.size >= this.maxSize) {
// Remove oldest entry
const firstKey = this.cache.keys().next().value;
this.cache.delete(firstKey);
}
this.cache.set(key, {
response,
timestamp: Date.now()
});
}
}

This comprehensive integration guide provides the foundation for building sophisticated AI workflows that leverage both LangChain’s capabilities and browser extension functionality while respecting browser security and performance constraints.