From 2765bd772d4d2e31d91adf9e93a379404fbd1fb5 Mon Sep 17 00:00:00 2001 From: Woongjun Choi Date: Wed, 15 Jan 2025 16:42:28 +0900 Subject: [PATCH 1/2] swin_transformer_v2.py error RuntimeError fixed --- models/swin_transformer_v2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/swin_transformer_v2.py b/models/swin_transformer_v2.py index a429d0a2c..0bccdb8d3 100644 --- a/models/swin_transformer_v2.py +++ b/models/swin_transformer_v2.py @@ -153,7 +153,7 @@ def forward(self, x, mask=None): # cosine attention attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) - logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01))).exp() + logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01).cuda())).exp() attn = attn * logit_scale relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) From 0a932a40c37afc5c1d8cfce7ff175ee780349e3e Mon Sep 17 00:00:00 2001 From: Woongjun Choi Date: Sat, 18 Jan 2025 14:33:28 +0900 Subject: [PATCH 2/2] fix swin_transformer_v2.py RuntimeError Error : RuntimeError Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking argument for argument max in method wrapper_CUDA_clamp_Tensor) Modified : Before : logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01).cuda())).exp() After : logit_scale_device =self.logit_scale.device logit_scale = torch.clamp(self.logit_scale, max=torch.log( torch.tensor(1. / 0.01).to(logit_scale_device) ) ).exp() --- models/swin_transformer_v2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/models/swin_transformer_v2.py b/models/swin_transformer_v2.py index 0bccdb8d3..4cdc4eb69 100644 --- a/models/swin_transformer_v2.py +++ b/models/swin_transformer_v2.py @@ -153,7 +153,8 @@ def forward(self, x, mask=None): # cosine attention attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) - logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01).cuda())).exp() + logit_scale_device =self.logit_scale.device + logit_scale = torch.clamp(self.logit_scale, max=torch.log( torch.tensor(1. / 0.01).to(logit_scale_device) ) ).exp() attn = attn * logit_scale relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)