@@ -612,6 +612,78 @@ def patched_vectorstore_init_subclass(func, instance, args, kwargs):
612612 log .warning ("Unable to patch LangChain VectorStore class %s" , str (cls ))
613613
614614
615+ def traced_runnable_lambda_operation (is_batch : bool = False ):
616+ @with_traced_module
617+ def _traced_runnable_lambda_impl (langchain_core , pin , func , instance , args , kwargs ):
618+ integration : LangChainIntegration = langchain_core ._datadog_integration
619+
620+ instance_name = getattr (instance , "name" , None )
621+ default_name = f"{ instance .__class__ .__name__ } .{ func .__name__ } "
622+ if is_batch :
623+ span_name = f"{ instance_name } _batch" if instance_name else default_name
624+ else :
625+ span_name = instance_name or default_name
626+
627+ span = integration .trace (
628+ pin ,
629+ span_name ,
630+ submit_to_llmobs = True ,
631+ instance = instance ,
632+ )
633+
634+ integration .record_instance (instance , span )
635+
636+ result = None
637+
638+ try :
639+ result = func (* args , ** kwargs )
640+ return result
641+ except Exception :
642+ span .set_exc_info (* sys .exc_info ())
643+ raise
644+ finally :
645+ integration .llmobs_set_tags (span , args = args , kwargs = kwargs , response = result , operation = "runnable_lambda" )
646+ span .finish ()
647+
648+ return _traced_runnable_lambda_impl
649+
650+
651+ def traced_runnable_lambda_operation_async (is_batch : bool = False ):
652+ @with_traced_module
653+ async def _traced_runnable_lambda_impl (langchain_core , pin , func , instance , args , kwargs ):
654+ integration : LangChainIntegration = langchain_core ._datadog_integration
655+
656+ instance_name = getattr (instance , "name" , None )
657+ default_name = f"{ instance .__class__ .__name__ } .{ func .__name__ } "
658+ if is_batch :
659+ span_name = f"{ instance_name } _batch" if instance_name else default_name
660+ else :
661+ span_name = instance_name or default_name
662+
663+ span = integration .trace (
664+ pin ,
665+ span_name ,
666+ submit_to_llmobs = True ,
667+ instance = instance ,
668+ )
669+
670+ integration .record_instance (instance , span )
671+
672+ result = None
673+
674+ try :
675+ result = await func (* args , ** kwargs )
676+ return result
677+ except Exception :
678+ span .set_exc_info (* sys .exc_info ())
679+ raise
680+ finally :
681+ integration .llmobs_set_tags (span , args = args , kwargs = kwargs , response = result , operation = "runnable_lambda" )
682+ span .finish ()
683+
684+ return _traced_runnable_lambda_impl
685+
686+
615687def patch ():
616688 if getattr (langchain_core , "_datadog_patch" , False ):
617689 return
@@ -626,6 +698,7 @@ def patch():
626698 from langchain_core .language_models .chat_models import BaseChatModel
627699 from langchain_core .language_models .llms import BaseLLM
628700 from langchain_core .prompts .base import BasePromptTemplate
701+ from langchain_core .runnables .base import RunnableLambda
629702 from langchain_core .runnables .base import RunnableSequence
630703 from langchain_core .tools import BaseTool
631704 from langchain_core .vectorstores import VectorStore
@@ -651,6 +724,11 @@ def patch():
651724 wrap (RunnableSequence , "stream" , traced_chain_stream (langchain_core ))
652725 wrap (RunnableSequence , "astream" , traced_chain_stream (langchain_core ))
653726
727+ wrap (RunnableLambda , "invoke" , traced_runnable_lambda_operation (is_batch = False )(langchain_core ))
728+ wrap (RunnableLambda , "ainvoke" , traced_runnable_lambda_operation_async (is_batch = False )(langchain_core ))
729+ wrap (RunnableLambda , "batch" , traced_runnable_lambda_operation (is_batch = True )(langchain_core ))
730+ wrap (RunnableLambda , "abatch" , traced_runnable_lambda_operation_async (is_batch = True )(langchain_core ))
731+
654732 wrap (BasePromptTemplate , "invoke" , patched_base_prompt_template_invoke (langchain_core ))
655733 wrap (BasePromptTemplate , "ainvoke" , patched_base_prompt_template_ainvoke (langchain_core ))
656734
@@ -683,6 +761,10 @@ def unpatch():
683761 unwrap (langchain_core .runnables .base .RunnableSequence , "abatch" )
684762 unwrap (langchain_core .runnables .base .RunnableSequence , "stream" )
685763 unwrap (langchain_core .runnables .base .RunnableSequence , "astream" )
764+ unwrap (langchain_core .runnables .base .RunnableLambda , "invoke" )
765+ unwrap (langchain_core .runnables .base .RunnableLambda , "ainvoke" )
766+ unwrap (langchain_core .runnables .base .RunnableLambda , "batch" )
767+ unwrap (langchain_core .runnables .base .RunnableLambda , "abatch" )
686768 unwrap (langchain_core .language_models .chat_models .BaseChatModel , "stream" )
687769 unwrap (langchain_core .language_models .chat_models .BaseChatModel , "astream" )
688770 unwrap (langchain_core .language_models .llms .BaseLLM , "stream" )
0 commit comments