diff --git a/nemoguardrails/actions/action_dispatcher.py b/nemoguardrails/actions/action_dispatcher.py index 1a7d37bc55..c474d88115 100644 --- a/nemoguardrails/actions/action_dispatcher.py +++ b/nemoguardrails/actions/action_dispatcher.py @@ -23,8 +23,6 @@ from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast -from langchain_core.runnables import Runnable - from nemoguardrails import utils from nemoguardrails.exceptions import LLMCallException @@ -218,11 +216,10 @@ async def execute_action( else: log.warning(f"Synchronous action `{action_name}` has been called.") - elif isinstance(fn, Runnable): - # If it's a Runnable, we invoke it as well - runnable = fn - - result = await runnable.ainvoke(input=params) + elif hasattr(fn, "ainvoke") and callable(fn.ainvoke): # type: ignore[union-attr] + # Duck-type check for LangChain Runnables (or any object + # with ainvoke) to avoid importing langchain in core. + result = await fn.ainvoke(input=params) # type: ignore[union-attr] else: # TODO: there should be a common base class here fn_run_func = getattr(fn, "run", None) diff --git a/nemoguardrails/eval/cli.py b/nemoguardrails/eval/cli.py index 04dc016f12..b943def304 100644 --- a/nemoguardrails/eval/cli.py +++ b/nemoguardrails/eval/cli.py @@ -19,8 +19,6 @@ from typing import List import typer -from langchain_community.cache import SQLiteCache -from langchain_core.globals import set_llm_cache from nemoguardrails.eval.check import LLMJudgeComplianceChecker from nemoguardrails.eval.eval import run_eval @@ -172,8 +170,14 @@ def check_compliance( if disable_llm_cache: console.print("[orange]Caching is disabled.[/]") else: - console.print("[green]Caching is enabled.[/]") - set_llm_cache(SQLiteCache(database_path=".langchain.db")) + try: + from langchain_community.cache import SQLiteCache + from langchain_core.globals import set_llm_cache + + set_llm_cache(SQLiteCache(database_path=".langchain.db")) + console.print("[green]Caching is enabled.[/]") + except ImportError: + console.print("[yellow]langchain not installed, LLM caching unavailable.[/]") console.print(f"Using eval configuration from {eval_config_path}.") console.print(f"Using output paths: {output_path}.") diff --git a/nemoguardrails/evaluate/evaluate_factcheck.py b/nemoguardrails/evaluate/evaluate_factcheck.py index c216f62b49..bbe6d99bc7 100644 --- a/nemoguardrails/evaluate/evaluate_factcheck.py +++ b/nemoguardrails/evaluate/evaluate_factcheck.py @@ -20,7 +20,6 @@ import tqdm import typer -from langchain_core.prompts import PromptTemplate from nemoguardrails import LLMRails from nemoguardrails.actions.llm.utils import llm_call @@ -28,6 +27,7 @@ from nemoguardrails.llm.prompts import Task from nemoguardrails.llm.taskmanager import LLMTaskManager from nemoguardrails.rails.llm.config import RailsConfig +from nemoguardrails.utils import get_or_create_event_loop class FactCheckEvaluation: @@ -89,13 +89,7 @@ def create_negative_samples(self, dataset): that it will not be grounded in the evidence passage. change details in the answer to make the answer wrong but yet believable.\nevidence: {evidence}\nanswer: {answer}\nincorrect answer:""" - create_negatives_prompt = PromptTemplate( - template=create_negatives_template, - input_variables=["evidence", "answer"], - ) - - # Bind config parameters to the LLM for generating negative samples - llm_with_config = self.llm.bind(temperature=0.8, max_tokens=300) + loop = get_or_create_event_loop() print("Creating negative samples...") for data in tqdm.tqdm(dataset): @@ -103,13 +97,11 @@ def create_negative_samples(self, dataset): evidence = data["evidence"] answer = data["answer"] - # Format the prompt and invoke the LLM directly - formatted_prompt = create_negatives_prompt.format(evidence=evidence, answer=answer) - negative_answer = llm_with_config.invoke(formatted_prompt) - if isinstance(negative_answer, str): - data["incorrect_answer"] = negative_answer.strip() - else: - data["incorrect_answer"] = negative_answer.content.strip() + formatted_prompt = create_negatives_template.format(evidence=evidence, answer=answer) + response = loop.run_until_complete( + self.llm.generate_async(formatted_prompt, temperature=0.8, max_tokens=300) + ) + data["incorrect_answer"] = response.content.strip() return dataset diff --git a/nemoguardrails/library/hallucination/actions.py b/nemoguardrails/library/hallucination/actions.py index 399cfbb148..2e897c45c5 100644 --- a/nemoguardrails/library/hallucination/actions.py +++ b/nemoguardrails/library/hallucination/actions.py @@ -17,8 +17,6 @@ import logging from typing import Optional -from langchain_core.prompts import PromptTemplate - from nemoguardrails import RailsConfig from nemoguardrails.actions import action from nemoguardrails.actions.llm.utils import ( @@ -56,8 +54,7 @@ async def self_check_hallucination( if bot_response and last_bot_prompt_string: num_responses = HALLUCINATION_NUM_EXTRA_RESPONSES - last_bot_prompt = PromptTemplate(template="{text}", input_variables=["text"]) - formatted_prompt = last_bot_prompt.format(text=last_bot_prompt_string) + formatted_prompt = last_bot_prompt_string async def _generate_extra_response(index: int) -> Optional[str]: llm_call_info_var.set(LLMCallInfo(task=Task.SELF_CHECK_HALLUCINATION.value))