From 16a320df746ffd0c290766187b07c47d241a59dd Mon Sep 17 00:00:00 2001
From: Matthias Keck <matthias.keck@student.uni-halle.de>
Date: Mon, 31 Mar 2025 20:55:55 +0000
Subject: [PATCH] Edit bechmark_with_mem.py

---
 Attention-Benchmarking/bechmark_with_mem.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Attention-Benchmarking/bechmark_with_mem.py b/Attention-Benchmarking/bechmark_with_mem.py
index ba654e1..f32c5a0 100644
--- a/Attention-Benchmarking/bechmark_with_mem.py
+++ b/Attention-Benchmarking/bechmark_with_mem.py
@@ -114,7 +114,7 @@ for causal in causal_vals:
 
             results[config]["Pytorch"] = {"fwd_time": f, "bwd_time": b, "mem_used_MB": mem_used, "mem_peak_MB": mem_peak}
 
-            # xFormers
+            # FlashAttention mit xFormers
             if xops is not None:
                 q, k, v = [torch.randn(batch_size, seqlen, nheads, headdim, device=device, dtype=dtype, requires_grad=True) for _ in range(3)]
                 f, b, mem_used, mem_peak = benchmark_with_memory(
-- 
GitLab