From ec6c48b8ff0688536f870cbaf571ee172b4b16bd Mon Sep 17 00:00:00 2001 From: Phil Wang Date: Fri, 19 Jul 2024 10:00:03 -0700 Subject: [PATCH] norm not needed when reusing attention in lookvit --- setup.py | 2 +- vit_pytorch/look_vit.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 8518b7f..c815ca8 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name = 'vit-pytorch', packages = find_packages(exclude=['examples']), - version = '1.7.1', + version = '1.7.2', license='MIT', description = 'Vision Transformer (ViT) - Pytorch', long_description=long_description, diff --git a/vit_pytorch/look_vit.py b/vit_pytorch/look_vit.py index 2c1788c..3b7b27b 100644 --- a/vit_pytorch/look_vit.py +++ b/vit_pytorch/look_vit.py @@ -77,7 +77,7 @@ def __init__( self.split_heads = Rearrange('b n (h d) -> b h n d', h = heads) - self.norm = LayerNorm(dim) + self.norm = LayerNorm(dim) if not reuse_attention else nn.Identity() self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout)