Skip to content

Commit

Permalink
refactor: Implement LLM tracing callback to improve parsing of tokens…
Browse files Browse the repository at this point in the history
… usage stats (#9311)

Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
  • Loading branch information
OlegIvaniv committed May 12, 2024
1 parent 2445205 commit 359ade4
Show file tree
Hide file tree
Showing 19 changed files with 282 additions and 111 deletions.
Expand Up @@ -9,8 +9,9 @@ import {
} from 'n8n-workflow';

import { ChatAnthropic } from '@langchain/anthropic';
import { logWrapper } from '../../../utils/logWrapper';
import type { LLMResult } from '@langchain/core/outputs';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

const modelField: INodeProperties = {
displayName: 'Model',
Expand Down Expand Up @@ -166,17 +167,29 @@ export class LmChatAnthropic implements INodeType {
topP: number;
};

const tokensUsageParser = (llmOutput: LLMResult['llmOutput']) => {
const usage = (llmOutput?.usage as { input_tokens: number; output_tokens: number }) ?? {
input_tokens: 0,
output_tokens: 0,
};
return {
completionTokens: usage.output_tokens,
promptTokens: usage.input_tokens,
totalTokens: usage.input_tokens + usage.output_tokens,
};
};
const model = new ChatAnthropic({
anthropicApiKey: credentials.apiKey as string,
modelName,
maxTokens: options.maxTokensToSample,
temperature: options.temperature,
topK: options.topK,
topP: options.topP,
callbacks: [new N8nLlmTracing(this, { tokensUsageParser })],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -9,9 +9,9 @@ import {

import type { ChatOllamaInput } from '@langchain/community/chat_models/ollama';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { ollamaModel, ollamaOptions, ollamaDescription } from '../LMOllama/description';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmChatOllama implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -62,10 +62,11 @@ export class LmChatOllama implements INodeType {
baseUrl: credentials.baseUrl as string,
model: modelName,
format: options.format === 'default' ? undefined : options.format,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -8,8 +8,8 @@ import {
} from 'n8n-workflow';

import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmChatOpenAi implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -247,6 +247,7 @@ export class LmChatOpenAi implements INodeType {
timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2,
configuration,
callbacks: [new N8nLlmTracing(this)],
modelKwargs: options.responseFormat
? {
response_format: { type: options.responseFormat },
Expand All @@ -255,7 +256,7 @@ export class LmChatOpenAi implements INodeType {
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -8,8 +8,8 @@ import {
} from 'n8n-workflow';

import { Cohere } from '@langchain/cohere';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmCohere implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -97,10 +97,11 @@ export class LmCohere implements INodeType {
const model = new Cohere({
apiKey: credentials.apiKey as string,
...options,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -8,8 +8,8 @@ import {
} from 'n8n-workflow';

import { Ollama } from '@langchain/community/llms/ollama';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { ollamaDescription, ollamaModel, ollamaOptions } from './description';

export class LmOllama implements INodeType {
Expand Down Expand Up @@ -60,10 +60,11 @@ export class LmOllama implements INodeType {
baseUrl: credentials.baseUrl as string,
model: modelName,
...options,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -9,8 +9,8 @@ import type {
} from 'n8n-workflow';

import { OpenAI, type ClientOptions } from '@langchain/openai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

type LmOpenAiOptions = {
baseURL?: string;
Expand Down Expand Up @@ -240,10 +240,11 @@ export class LmOpenAi implements INodeType {
configuration,
timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -8,8 +8,8 @@ import {
} from 'n8n-workflow';

import { HuggingFaceInference } from '@langchain/community/llms/hf';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmOpenHuggingFaceInference implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -141,10 +141,11 @@ export class LmOpenHuggingFaceInference implements INodeType {
model: modelName,
apiKey: credentials.apiKey as string,
...options,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -7,12 +7,12 @@ import {
type SupplyData,
} from 'n8n-workflow';
import { BedrockChat } from '@langchain/community/chat_models/bedrock';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
// Dependencies needed underneath the hood. We add them
// here only to track where what dependency is used
import '@aws-sdk/credential-provider-node';
import '@aws-sdk/client-bedrock-runtime';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmChatAwsBedrock implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -152,10 +152,11 @@ export class LmChatAwsBedrock implements INodeType {
accessKeyId: credentials.accessKeyId as string,
sessionToken: credentials.sessionToken as string,
},
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -9,8 +9,8 @@ import {

import type { ClientOptions } from '@langchain/openai';
import { ChatOpenAI } from '@langchain/openai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmChatAzureOpenAi implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -160,10 +160,11 @@ export class LmChatAzureOpenAi implements INodeType {
timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2,
configuration,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -8,8 +8,8 @@ import {
} from 'n8n-workflow';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import type { HarmBlockThreshold, HarmCategory, SafetySetting } from '@google/generative-ai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { harmCategories, harmThresholds } from './options';

export class LmChatGoogleGemini implements INodeType {
Expand Down Expand Up @@ -224,10 +224,11 @@ export class LmChatGoogleGemini implements INodeType {
temperature: options.temperature,
maxOutputTokens: options.maxOutputTokens,
safetySettings,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -7,8 +7,8 @@ import {
type SupplyData,
} from 'n8n-workflow';
import { ChatGooglePaLM } from '@langchain/community/chat_models/googlepalm';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmChatGooglePalm implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -156,10 +156,11 @@ export class LmChatGooglePalm implements INodeType {
apiKey: credentials.apiKey as string,
modelName,
...options,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -8,8 +8,8 @@ import {
} from 'n8n-workflow';

import { ChatGroq } from '@langchain/groq';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmChatGroq implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -142,10 +142,11 @@ export class LmChatGroq implements INodeType {
modelName,
maxTokens: options.maxTokensToSample,
temperature: options.temperature,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -9,8 +9,8 @@ import {

import type { ChatMistralAIInput } from '@langchain/mistralai';
import { ChatMistralAI } from '@langchain/mistralai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmChatMistralCloud implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -188,10 +188,11 @@ export class LmChatMistralCloud implements INodeType {
apiKey: credentials.apiKey as string,
modelName,
...options,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}
Expand Up @@ -7,8 +7,8 @@ import {
type SupplyData,
} from 'n8n-workflow';
import { GooglePaLM } from '@langchain/community/llms/googlepalm';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';

export class LmGooglePalm implements INodeType {
description: INodeTypeDescription = {
Expand Down Expand Up @@ -163,10 +163,11 @@ export class LmGooglePalm implements INodeType {
apiKey: credentials.apiKey as string,
modelName,
...options,
callbacks: [new N8nLlmTracing(this)],
});

return {
response: logWrapper(model, this),
response: model,
};
}
}

0 comments on commit 359ade4

Please sign in to comment.