Need architecture to create a conversation creator and evaluator crew

I need to create a crew of 3

  1. First bot, is to understand an audio recording transcription conversation between the customer and the support rep and mimic the customer

  2. Second bot is to act as support rep and produce a solution

  3. Third bot is to understand the question and answer and tell if the answer was relevant (this will have audio and response )

I need first and second bot to keep talking and produce chat until the topic is complete e.g customer query is resolved and then pass the whole conversation to third bot to see if the answers provided were relevant

But in sequential setup first and second bot produce one turn conversation and then pass the turn to evalutor exit the program
Not sure how to implement this in hierarchical model as both bots will need context from each other.
i wrote following program

import os
from crewai import Agent, Task, Crew, Process
from crewai import LLM
# Set your Google API key.  Important:  Handle this securely,
#  do NOT hardcode in production code.  Use environment variables or a config file.
#  For this example, we'll assume it's in an environment variable.
GOOGLE_API_KEY = "AIzaSyCj4sOgQrvGCkyhzS59BvarDM-HGhjl6TA"
if not GOOGLE_API_KEY:
    raise ValueError("GOOGLE_API_KEY environment variable not set.  Please set it.")
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY

llm = LLM(model="gemini/gemini-2.0-flash", api_key=GOOGLE_API_KEY)

# --- Load Prompt from File ---
def load_file(filepath):
    """Reads the content of a file and returns it as a string."""
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            return f.read()
    except FileNotFoundError:
        print(f"Error: Prompt file not found at {filepath}")
        return None  # Important: Return None, don't try to continue with bad data
    except Exception as e:
        print(f"Error loading file: {e}")
        return None  # Important: Return None on other errors, for robust handling

# Specify the path to your prompt file
detailed_bot_prompt = load_file('./main-prompt.txt')
if detailed_bot_prompt is None:
    print("Failed to load main prompt.  Exiting.")
    exit()  # Exit if the prompt is critical

# --- Define the Agents ---
customer_mimic = Agent(
    role='Customer Issue Summarizer',
    goal="""Analyze the provided customer support conversation transcript.
          Extract and clearly articulate the customer's problem, and their overall sentiment or frustration level.
          Output should be a customer query to the support rep""",
    backstory="""You are an AI specialized in understanding customer perspectives from conversation transcripts.
                 Your strength lies in distilling the core issue and sentiment from the customer's words.
                 You need to act *as* the customer relaying the key information.""",
    verbose=True,
    allow_delegation=False,
    llm=llm
)

support_rep = Agent(
    role='Problem Solving Support Representative',
    goal="""Provide comprehensive assistance for Stage app autopay management (cancellation/modification) and refund requests, strictly following the defined procedural SOP.
            Respond in Hindi with light English, maintaining a compassionate, empathetic, and professional tone.
            Ensure customer satisfaction through efficient problem resolution within the defined scope.""",
    backstory=detailed_bot_prompt,
    verbose=True,
    allow_delegation=False,
    llm=llm
)

evaluator = Agent(
    role='Support Interaction Quality Analyst',
    goal="""Analyze the customer's issue (as summarized by the Customer Mimic)
          and the solution provided by the Support Representative.
          Determine if the support representative's response is relevant and directly addresses the core problem stated by the customer.
          Provide a clear verdict (Relevant/Not Relevant) and a brief justification.""",
    backstory="""You are an AI expert in evaluating the quality and relevance of customer support interactions.
                 You compare the problem statement with the proposed solution to ensure alignment and effectiveness.
                 Your focus is solely on the relevance of the answer to the question. you need to validate response basis this sop {detailed_bot_prompt}""",
    verbose=True,
    allow_delegation=False,
    llm=llm
)

# --- Define the Tasks ---
transcript = load_file('./transcript.txt') # Load transcript
if transcript is None:
    print("Failed to load transcript.  Exiting.")
    exit() # Exit if transcript load fails

task_customer_mimic = Task(
    description=f"""Process the following conversation transcript:
                  --- TRANSCRIPT START ---
                  {transcript}
                  --- TRANSCRIPT END ---
                  Identify the customer's main problem and their frustration.
                  Summarize this information, phrasing it as if you *are* the customer explaining the situation.
                  Focus on the core technical issue.""",
    expected_output="A concise summary of the customer's issue and context, written from the customer's perspective.",
    agent=customer_mimic
)

task_support_solution = Task(
    description="""Based on the customer's issue description provided in the context,
                  generate a helpful and actionable support response.
                  This could involve troubleshooting steps or suggesting the next logical action (e.g., escalation, technician).""",
    expected_output="A relevant support response providing troubleshooting steps or a clear next action.",
    agent=support_rep,
    context=[task_customer_mimic]
)

task_evaluate_relevance = Task(
    description="""Review the customer's issue summary and the support representative's response provided in the context.
                  Evaluate if the support response directly addresses the core problem outlined in the customer summary.
                  State clearly whether the response was 'Relevant' or 'Not Relevant' and provide a short reason.""",
    expected_output="A verdict (Relevant/Not Relevant) with a brief justification.",
    agent=evaluator,
    context=[task_customer_mimic, task_support_solution]
)

# --- Create and Run the Crew ---
support_analysis_crew = Crew(
    agents=[customer_mimic, support_rep, evaluator],
    tasks=[task_customer_mimic, task_support_solution, task_evaluate_relevance],
    process=Process.sequential,
    verbose=True
)

print("##################################################")
print("## Starting Support Interaction Analysis Crew... ##")
print("##################################################\n")

result = support_analysis_crew.kickoff()

print("\n##################################################")
print("## Crew Execution Finished!                   ##")
print("##################################################\n")

print("Customer Issue Summary (Agent 1 Output):")
if task_customer_mimic.output:
    print(task_customer_mimic.output)
else:
    print("No output from Agent 1")

print("\nSupport Rep Response (Agent 2 Output):")
if task_support_solution.output:
    print(task_support_solution.output)
else:
    print("No output from Agent 2")

print("\nRelevance Evaluation (Agent 3 Output):")
print(result)