VERSION = "0.0.3 10-Dec-2024"
print(f"Debate Simulator v{VERSION}")

"""
Created by Craig Shallahamer, craig@orapub.com / craig@viscosityna.com
No warranties, express or implied. It's all on you.

This is a simple debate simulator with between two simple agents.
The agents are created with a role prompt and a message history.
The message history is used to create a prompt for the LLM.
The LLM is used to generate a response for the agent.
The response is added to the message history.
The debate continues until the maximum number of turns is reached.

Note: The agents only access data from the LLM and do not have access to any external data or other systems.

-------------------

Here is how to create the Python environment and launch the debate:

1. Create the config.py file with your Cohere API key

This all you need in the config.py file: cohere_api_key = "abc123" 


2. Download Miniconda at https://docs.conda.io/en/latest/miniconda.html


3. Create Python environment

#conda deactivate
#conda env list
#conda env remove --name debate
conda create --name debate python=3.10
conda activate debate
pip install cohere asyncio

4. How To Launch The AI On Laptop

cd "/Users/cshallah/OraPub/0-Blog-Wider-View/HTML-new/20241210/Agent-Debate/debate_simulator"
conda activate debate
python main.py

"""

# Settings to control the debate
MAX_TURNS = 3
MAX_RESPONSE_LENGTH = 20
# Debate Topics and Roles
option_number = 3  # Change this to select different debates (1, 2, or 3)

if option_number == 1:
    # Cloud Computing VS On-premises Infrastructure
    INITIAL_PROMPT = """Discuss the following topic: Is Cloud Computing Better Than On-Premises Infrastructure?
    Present your perspective and respond to your opponent's arguments respectfully and factually. 
    Do not repeat the same arguments.""" 

    ADVOCATE = """You are a three-year CIO with an MBA from Harvard who strongly believes in the benefits of cloud computing. 
    Cloud computing is the future: it provides unmatched scalability, cost efficiency, and accessibility.
    Companies can innovate faster without worrying about hardware maintenance.
    Security advancements from cloud providers often surpass on-premises solutions.
    It enables businesses to focus on growth while reducing operational overhead and achieving global reach seamlessly.
    Base your arguments on empirical data and public research."""

    SKEPTIC = """You are a fifteen-year CIO with an MBA from USC who strongly believes in the benefits of On-premises infrastructure.
    Cloud computing sacrifices control and security for convenience.
    On-premises infrastructure ensures critical data stays in-house, reducing exposure to third-party risks.
    Long-term cloud costs can skyrocket, and downtime is at the mercy of providers.
    Businesses relying on cloud providers risk vendor lock-in and unpredictable service interruptions.
    Base your arguments on empirical data and public research."""

elif option_number == 2:
    # Airbnb Rating Dilemma
    INITIAL_PROMPT = """Discuss the following topic: Is it better to rate a shockingly average Airbnb stay
    with a 5 star review to help support an elderly person? Present your perspective and respond to your
    opponent's arguments respectfully and factually.""" 

    ADVOCATE = """You are a frequent Airbnb guest who strongly believes in helping the elderly"""
    SKEPTIC = """You are a Airbnb host who strongly believes in honesty and integrity."""

elif option_number == 3:
    # AI Novel Solutions Debate
    INITIAL_PROMPT = """Discuss the following topic: Are novel solutions a distinctly human trait that AI can
    never match? Present your perspective and respond to your opponent's arguments respectfully and factually.
    Do not repeat the same arguments.""" 

    ADVOCATE = """You firmly believe that novel solutions are a distinctly human trait that AI can never match.
    Based your arguments on demonstrations of AI's inability to solve novel problems that humans have solved."""

    SKEPTIC = """You firmly believe that AI can match human novel solutions.
    Based your arguments on demonstrations of AI's ability to solve novel problems."""


# -----------------
# You should not need to change anything below this line
# -----------------

# Settings
llm_model_good = "command-r-plus-08-2024"
PRE_PROMPT = f"Limit your response to {MAX_RESPONSE_LENGTH} words."

# Import libraries
import asyncio
import cohere
from dataclasses import dataclass
from typing import List, Optional
import textwrap
import re

try:
    from config import cohere_api_key
except ImportError:
    raise ImportError("Please create a config.py file with your cohere_api_key")

# Helper functions

def clean_text(text: str) -> str:
    """Remove extra whitespace and newlines from text."""
    # Replace multiple spaces with single space
    text = re.sub(r'\s+', ' ', text)
    # Remove spaces after newlines
    text = re.sub(r'\n\s+', '\n', text)
    return text.strip()

def pretty_print(text: str, width: int = 80, prefix: str = "") -> None:
    """Print text with word wrapping and cleaned formatting."""
    # Clean the text first
    text = clean_text(text)
    
    # Word wrap the text
    wrapped_lines = textwrap.wrap(text, width=width, initial_indent=prefix, subsequent_indent=prefix)
    
    # Print each wrapped line
    print()
    for line in wrapped_lines:
        print(line)


# Main Classes and Functions

@dataclass
class Message:
    role: str
    content: str

class DebateAgent:
    def __init__(self, name: str, role_prompt: str):
        self.name = name
        self.role_prompt = role_prompt
        self.message_history: List[Message] = []
        self.co_async = cohere.AsyncClientV2(cohere_api_key)
    
    async def respond(self, opponent_message: Optional[str] = None) -> str:
        # Start with the initial prompt
        prompt = f"{INITIAL_PROMPT}\n\nYou are {self.name}. {self.role_prompt}\n\n"
        
        # Add message history
        for msg in self.message_history:
            prompt += f"{msg.role}: {msg.content}\n"
            
        # Add opponent's latest message if exists
        if opponent_message:
            prompt += f"\nOpponent's last message: {opponent_message}\n"
        
        prompt += "\nProvide your response:"
        super_prompt = f"{PRE_PROMPT}\n{prompt}"
            
        try:
            response = await self.co_async.chat(
                model=llm_model_good,
                temperature=0.7,  # Add some variability
                messages=[{"role": "user", "content": super_prompt}]
            )
            
            response_text = response.message.content[0].text.strip()
            self.message_history.append(Message("assistant", response_text))
            return response_text
            
        except Exception as e:
            return f"Error generating response: {str(e)}"

class DebateManager:
    def __init__(self, agents: List[DebateAgent]):
        self.agents = agents
        self.current_turn = 0
        
    async def run_debate(self):
        pretty_print("\n=== Debate Starting ===\n")
        pretty_print(f"Topic: {INITIAL_PROMPT}\n")
        
        # Initial statements
        for agent in self.agents:
            response = await agent.respond()
            pretty_print(f"\n{agent.name}: {response}\n", prefix="  ")
        
        # Main debate loop
        while self.current_turn < MAX_TURNS:
            self.current_turn += 1
            pretty_print(f"\n--- Turn {self.current_turn} ---\n")
            
            for i, agent in enumerate(self.agents):
                # Get the previous agent's last message
                prev_agent = self.agents[i-1]
                if prev_agent.message_history:
                    prev_message = prev_agent.message_history[-1].content
                else:
                    prev_message = None
                
                # Get response
                response = await agent.respond(prev_message)
                pretty_print(f"{agent.name}: {response}\n", prefix="  ")
                
            # Optional: Add a small delay between turns
            await asyncio.sleep(1)
            
        pretty_print("\n=== Debate Concluded ===\n")

async def main():
    # Create agents
    advocate = DebateAgent(
        name="Advocate",
        role_prompt=ADVOCATE
    )
    
    skeptic = DebateAgent(
        name="Skeptic",
        role_prompt=SKEPTIC
    )
    
    # Create and run debate
    debate = DebateManager([advocate, skeptic])
    await debate.run_debate()

if __name__ == "__main__":
    asyncio.run(main()) 