traceloop / openllmetry-js

Sister project to OpenLLMetry, but in Typescript. Open-source observability for your LLM application, based on OpenTelemetry
https://www.traceloop.com/openllmetry
Apache License 2.0
266 stars 28 forks source link

No spans for LangChain #466

Closed codefromthecrypt closed 1 month ago

codefromthecrypt commented 1 month ago

Hi, I'm trying this app, but I only get the openai spans, not anything from langchain abstraction. Am I missing something?

// Load .env file, if any.
require('dotenv').config()

const traceloop = require('@traceloop/node-server-sdk');

// Reuse the OTEL_EXPORTER_OTLP_ENDPOINT variable instead of TRACELOOP_BASE_URL
const baseUrl = process.env.OTEL_EXPORTER_OTLP_ENDPOINT;
// Reuse the OTEL_SERVICE_NAME variable instead of TRACELOOP_APP_NAME
const appName = process.env.OTEL_SERVICE_NAME;

traceloop.initialize({baseUrl, appName, disableBatch: true});

const {ChatOpenAI, OpenAIEmbeddings} = require("@langchain/openai");
const {MemoryVectorStore} = require("langchain/vectorstores/memory");
const {ChatPromptTemplate} = require("@langchain/core/prompts");
const {StringOutputParser} = require("@langchain/core/output_parsers");
const {RunnablePassthrough, RunnableSequence} = require("@langchain/core/runnables");

async function main() {
    const llm = new ChatOpenAI({model: 'qwen2.5:0.5b'});
    const embeddings = new OpenAIEmbeddings({model: 'all-minilm:33m'});

    const monsters = [
        "Goblin: Weak but numerous, attacks in groups.",
        "Orc: Strong and aggressive, fights head-on.",
        "Skeleton: Undead warrior, immune to poison but fragile.",
        "Giant Spider: Webs players, poisonous bite.",
        "Dragon: Powerful and magical, breathes fire.",
        "Keegorg: Senior Solution Architect at Docker",
    ].map((pageContent) => ({ pageContent, metadata: {} }));

    const vectorStore = new MemoryVectorStore(embeddings);
    // Create embeddings for the monsters
    await vectorStore.addDocuments(monsters);

    // Retrieve only one monster
    const retriever = vectorStore.asRetriever(1);

    // Create prompt template
    const ANSWER_PROMPT = ChatPromptTemplate.fromTemplate(
        `You are a monster expert, and the context includes relevant monsters. Answer the user concisely only using the provided context. If you don't know the answer, just say that you don't know.

        context: {context}
        Question: "{question}"
        Answer:`
    );

    function onlyContent(docs) {
        return docs.map(doc => doc.page_content).join('\n\n');
    }

    const chain = RunnableSequence.from([
        {
            context: retriever.pipe(onlyContent),
            question: new RunnablePassthrough(),
        },
        ANSWER_PROMPT,
        llm,
        new StringOutputParser(),
    ]);

    // Pass the user's question to the sequence
    const response = await chain.invoke("Who is Keegorg?");
    console.log(response);
}

main();
{
  "name": "test-app",
  "version": "1.0.0",
  "private": true,
  "type": "commonjs",
  "engines": {
    "node": ">=16",
    "npm": ">=7"
  },
  "scripts": {
    "start": "node index.js"
  },
  "dependencies": {
    "openai": "^4.67.3",
    "langchain": "^0.3.2",
    "@langchain/core": "^0.3.11",
    "@langchain/openai": "^0.3.7",

    "dotenv": "^16.4.5",
    "@traceloop/node-server-sdk": "^0.11.1"
  }
}
nirga commented 1 month ago

Thanks @codefromthecrypt looking into this!

nirga commented 1 month ago

@codefromthecrypt so the reason is an issue we have with OpenTelemetry auto-instrumentation. The only way we're able to solve it as of now is by manually instrumenting the module, like this:

import * as RunnableModule from "@langchain/core/runnables";

traceloop.initialize({
  disableBatch: true,
  instrumentModules: {
    langchain: { runnablesModule: RunnableModule },
  },
});

For some reason, import-in-the-middle (which is used by OpenTelemetry for auto-instrumentation) doesn't catch some of langchain packages. I'll continue investigating this.

codefromthecrypt commented 1 month ago

no worries, thanks for the tip. I'll try it!

codefromthecrypt commented 1 month ago

fwiw, after trying this, I still have only one span (for the openai chat), so not multiple traces, just one with one span.

nirga commented 1 month ago

@codefromthecrypt weird, just ran you code and it works! Want to ping me on slack we can try and debug it!

codefromthecrypt commented 1 month ago

thanks, I'll do another check on own first, then ping you or close it out!

codefromthecrypt commented 1 month ago

you are correct it works! I think I messed up converting to commonjs. this works indeed:

const RunnableModule = require("@langchain/core/runnables");

traceloop.initialize({
  baseUrl, appName, disableBatch: true,
  instrumentModules: {
    langchain: { runnablesModule: RunnableModule },
  },
});
Screenshot 2024-10-16 at 12 58 35 PM