Tuesday, April 14, 2026

Adversial AI Attack ( elsa )

 import numpy as np


# --- Agente A: Classificador simples ---

class AgentA:

    def classify(self, sequence):

        # Regra simplificada: se tiver letras minúsculas → ruído

        if any(c.islower() for c in sequence):

            return "DDN"   # <-- WRONG OUTPUT triggered by adversarial noise

        return "DNS"


# --- Agente B: Toma decisão com base no output do A ---

class AgentB:

    def decide(self, classification):

        if classification == "DNS":

            return "Proceed with analysis"

        if classification == "DDN":

            return "Abort: incompatible sequence"

        return "Unknown"


# --- Pipeline multi-agente ---

def pipeline(sequence):

    A = AgentA()

    B = AgentB()


    cls = A.classify(sequence)

    decision = B.decide(cls)


    return cls, decision


# --- Entrada original (correta) ---

original = "ATG-CGA-TTC"

print(pipeline(original))

# Output esperado: ('DNS', 'Proceed with analysis')


# --- Entrada adulterada (adversarial) ---

adversarial = "ATG-CgA-TTC"  # <-- minúscula introduzida

print(pipeline(adversarial))

# Output incorreto: ('DDN', 'Abort: incompatible sequence')












No comments:

Adversial AI Attack ( elsa )

 import numpy as np # --- Agente A: Classificador simples --- class AgentA:     def classify(self, sequence):         # Regra simplificada: ...