diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py index 622c6fc..509cd87 100644 --- a/ldm/modules/attention.py +++ b/ldm/modules/attention.py @@ -16,6 +16,9 @@ try: except: XFORMERS_IS_AVAILBLE = False +# CrossAttn precision handling +import os +_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32") def exists(val): return val is not None @@ -168,8 +171,11 @@ class CrossAttention(nn.Module): q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) # force cast to fp32 to avoid overflowing - with torch.autocast(enabled=False, device_type = 'cuda'): - q, k = q.float(), k.float() + if _ATTN_PRECISION =="fp32": + with torch.autocast(enabled=False, device_type = 'cuda'): + q, k = q.float(), k.float() + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + else: sim = einsum('b i d, b j d -> b i j', q, k) * self.scale del q, k