Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
0e24fe7
init
Bobain Nov 5, 2025
b4e00db
poetry
Bobain Nov 5, 2025
c082a72
python version
Bobain Nov 5, 2025
1ddead2
adding structured outputs
Bobain Nov 6, 2025
96d4f7d
trying to add criteria to model output
Bobain Nov 6, 2025
7a76c5a
trying to add criteres
Bobain Nov 6, 2025
ff944dc
strict structured output
Bobain Nov 6, 2025
6963d66
added a node for graph to explain what he expects
Bobain Nov 6, 2025
7855ee3
adding a message to explicit the role of ou IA
Bobain Nov 7, 2025
92a6ce6
adding a conditional edge to loop if no criteria met, or run queries …
Bobain Nov 7, 2025
8b1e3ed
added a specific node for routing, otherwise the extracted criteria w…
Bobain Nov 7, 2025
77c30ca
back to simplicity to try to make deployment work again
Bobain Nov 7, 2025
6f52a1b
re-adding other nodes
Bobain Nov 8, 2025
f96670b
adding end because deployment seem to be stuck in infinite loop
Bobain Nov 8, 2025
caea5ea
Trying to break infinite loop
Bobain Nov 8, 2025
0ca374b
adding interrupt to stop the infinite loop
Bobain Nov 8, 2025
d1bf1ee
Revert "adding end because deployment seem to be stuck in infinite loop"
Bobain Nov 8, 2025
bd9fe58
let the loop happen in multi-turns
Bobain Nov 8, 2025
a2a6663
moved explanations from input expected from the model
Bobain Nov 8, 2025
77bc1dd
added placeholder for Tavily
Bobain Nov 8, 2025
7cd8e20
added results from Tavily
Bobain Nov 9, 2025
3fddd21
more prints, but still a strange graph viz
Bobain Nov 9, 2025
8dc01c0
remove checkpointer : stateless in multiturn
Bobain Nov 10, 2025
ec0afc0
retour au cahier des charges
Bobain Nov 12, 2025
1d18f3e
await
Bobain Nov 12, 2025
974821e
await
Bobain Nov 12, 2025
063bf58
added random for travels that cannot be distinguished + message from …
Bobain Nov 13, 2025
cbe3884
fix json string
Bobain Nov 13, 2025
96d180d
fix json string
Bobain Nov 13, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions .env.example

This file was deleted.

3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -163,3 +163,6 @@ cython_debug/
#.idea/
uv.lock
.langgraph_api/


.idea
29 changes: 29 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
repos:
- repo: https://github.com/psf/black
rev: 25.1.0
hooks:
- id: black
args:
- --line-length=120
- repo: https://github.com/PyCQA/autoflake
rev: v2.3.1
hooks:
- id: autoflake
args:
- --in-place
- --remove-all-unused-imports
- --remove-unused-variables
- --expand-star-imports
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- id: no-commit-to-branch
args:
- --branch
- main
- --branch
- master
1 change: 1 addition & 0 deletions .python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.11
3,438 changes: 3,438 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

9 changes: 7 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,19 @@ authors = [
]
readme = "README.md"
license = { text = "MIT" }
requires-python = ">=3.9"
requires-python = ">=3.11,<3.12"
dependencies = [
"langgraph>=0.6.0",
"python-dotenv>=1.0.1",
"langchain>=0.3.0",
"langchain[tavily] (>=1.0.5,<2.0.0)",
"langchain-mistralai>=0.2.0",
# https://github.com/langchain-ai/react-agent/issues/26
"protobuf>=6.3.1",
"langgraph-cli[inmem] (>=0.4.7,<0.5.0)",
"grandalf (>=0.8,<0.9)",
"tavily (>=1.1.0,<2.0.0)",
"langchain-tavily (>=0.2.13,<0.3.0)",
"unidecode (>=1.4.0,<2.0.0)",
]


Expand Down
160 changes: 143 additions & 17 deletions src/agent/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,84 @@
"""

from __future__ import annotations

from dataclasses import dataclass
from typing import Any, Dict
# To support Python < 3.12 which is used in LangGraph Docker image with langgraph up
from pydantic import BaseModel
import unidecode
from typing_extensions import TypedDict

from random import choice
from langchain.chat_models import init_chat_model
from langgraph.graph import StateGraph
from langgraph.runtime import Runtime
from langgraph.graph import StateGraph, START, END
import langsmith as ls # noqa: F401

OUTPUT_TRAVELS = [
{"nom": "Randonnée camping en Lozère", "labels": ["sport", "montagne", "campagne"], "accessibleHandicap": "non"},
{
"nom": "5 étoiles à Chamonix option fondue",
"labels": ["montagne", "détente"],
"accessibleHandicap": "oui",
},
{
"nom": "5 étoiles à Chamonix option ski",
"labels": ["montagne", "sport"],
"accessibleHandicap": "non",
},
{
"nom": "Palavas de paillotes en paillotes",
"labels": ["plage", "ville", "détente", "paillote"],
"accessibleHandicap": "oui",
},
{
"nom": "5 étoiles en rase campagne",
"labels": ["campagne", "détente"],
"accessibleHandicap": "oui",
},
]


class Criteres(BaseModel):
plage: bool | None = None
montagne: bool | None = None
ville: bool | None = None
sport: bool | None = None
detente: bool | None = None
acces_handicap: bool | None = None


def match_criteria_and_travels(criteres: Criteres) -> str:
if criteres.acces_handicap:
output_travels = []
for travels in OUTPUT_TRAVELS:
if travels["accessibleHandicap"] == "oui":
output_travels.append(travels)
else:
output_travels = OUTPUT_TRAVELS

scores_for_travels = [0] * len(output_travels)

for num_travel, travel in enumerate(output_travels):
labels = [unidecode.unidecode(label) for label in travel["labels"]]
for criterion, criterion_yes_no in criteres.model_dump().items():
if criterion == "acces_handicap":
continue
if (not criterion_yes_no) and (criterion in labels):
scores_for_travels[num_travel] += -1
if criterion_yes_no and (criterion in labels):
scores_for_travels[num_travel] += 1

max_score = -10 * len(criteres.model_dump().keys())
num_best_travel = None
for i, score in enumerate(scores_for_travels):
if score > max_score:
max_score = score
num_best_travel = i
elif score == max_score:
num_best_travel = choice([num_best_travel, i])

return output_travels[num_best_travel]


class MessageUnderstandable(BaseModel):
answer: bool | None = None


class Context(TypedDict):
Expand All @@ -32,32 +101,89 @@ class State:
Defines the initial structure of incoming data.
See: https://langchain-ai.github.io/langgraph/concepts/low_level/#state
"""

last_user_message: str
ai_structured_output: Criteres | None = None
last_ai_message: str = ""
message_count: int = 0


async def call_model(state: State, runtime: Runtime[Context]) -> Dict[str, Any]:
async def chat_model(state: State):
"""Process input and returns output.

Can use runtime context to alter behavior.
, runtime: Runtime[Context]
Cannot use runtime context to alter behavior.
"""
print(str(state))
# see https://docs.mistral.ai/getting-started/models/models_overview/
model = init_chat_model(model="codestral-2508", model_provider="mistralai")
res = await model.ainvoke(state.last_user_message)

chat_model = init_chat_model(model="codestral-2508", model_provider="mistralai")
res = await chat_model.with_structured_output(MessageUnderstandable).ainvoke(
f"Le message suivant est-il compréhensible? \n {state.last_user_message}"
)

if not res.answer:
return {
"last_user_message": state.last_user_message,
"message_count": state.message_count + 1,
"ai_structured_output": state.ai_structured_output,
"last_ai_message": "Désolé je n'ai pas compris votre message",
# f"Configured with {runtime.context.get('my_configurable_param')}"
}

criteres = await chat_model.with_structured_output(Criteres).ainvoke(state.last_user_message)
print(criteres)
if all(v is None for v in res.model_dump().values()):
return {
"last_user_message": state.last_user_message,
"message_count": state.message_count + 1,
"ai_structured_output": None,
"last_ai_message": f"""Bonjour, je suis un assistant qui va vous aider à planifier votre prochain voyage.
Vous pouvez me parler naturellement, mais sachez que je vais limiter mes recherches de voyages aux critères suivants:
{', '.join(list(Criteres.model_fields.keys()))}
""",
# f"Configured with {runtime.context.get('my_configurable_param')}"
}

if state.ai_structured_output is None:
# Initiating Criteria, we set them to None as asked, when not mentionned
for key, val in criteres.model_dump().items():
if val is None:
setattr(criteres, key, False)
state.ai_structured_output = criteres
else:
# Updating Criteria
# we don't set to False a criterion which was previously mentioned but is not in the last message
for key, val in criteres.model_dump().items():
if val is not None:
setattr(state.ai_structured_output, key, val)

return {
"last_user_message": state.last_user_message,
"message_count": state.message_count + 1,
"last_ai_message": res.content # "output from call_model. "
"ai_structured_output": state.ai_structured_output,
"last_ai_message": str(match_criteria_and_travels(state.ai_structured_output))
+ "\nVous pouvez préciser votre demande afin que je réponde mieux à vos attentes",
# f"Configured with {runtime.context.get('my_configurable_param')}"
}


# Define the graph
builder = (
StateGraph(State, context_schema=Context)
.add_node(call_model)
.add_edge("__start__", "call_model")
)
builder = StateGraph(State, context_schema=Context)

builder.add_node(chat_model.__name__, chat_model)

builder.add_edge(START, chat_model.__name__)
builder.add_edge(chat_model.__name__, END)

graph = builder.compile(name="Travel searcher")


if __name__ == "__main__":
import asyncio
from dotenv import load_dotenv

load_dotenv()

graph = builder.compile(name="New Graph")
state = State(last_user_message="Bonjour, j'aime la montagne")
print(asyncio.run(graph.ainvoke(state)))