Commit 5e7d7f35 authored by Meet Narendra's avatar Meet Narendra 💬

Added logging for loss debugging

parent 45de1076
...@@ -56,6 +56,13 @@ class Loss: ...@@ -56,6 +56,13 @@ class Loss:
@params @params
Author: @jiteshg Author: @jiteshg
''' '''
loss_t = 0.0
a = 0.0
b = 0.0
for gen,cont,sty in zip(content_fmap_gen,cont_fmap_real,style_fmap_real): for gen,cont,sty in zip(content_fmap_gen,cont_fmap_real,style_fmap_real):
loss_t = alpha*Loss.content_loss(cont,gen) + beta*Loss.style_loss(sty,gen) loss_cont = Loss.content_loss(cont,gen)
return loss_t loss_style = Loss.style_loss(sty,gen)
\ No newline at end of file a+= loss_cont
b+= loss_style
loss_t += alpha*loss_cont + beta*loss_style
return loss_t,a,b
\ No newline at end of file
...@@ -17,9 +17,9 @@ class Optimizer: ...@@ -17,9 +17,9 @@ class Optimizer:
''' '''
LOGGER.info("Running gradient descent with the following parameters") LOGGER.info("Running gradient descent with the following parameters")
epoch = 5000 epoch = 5000
learning_rate = 0.001 learning_rate = 0.002
alpha = 10 alpha = 1
beta = 100 beta = 0.01
LOGGER.info(f"{epoch},{learning_rate},{alpha},{beta}") LOGGER.info(f"{epoch},{learning_rate},{alpha},{beta}")
optimizer=optim.Adam([content_img_clone],lr=learning_rate) optimizer=optim.Adam([content_img_clone],lr=learning_rate)
...@@ -33,7 +33,7 @@ class Optimizer: ...@@ -33,7 +33,7 @@ class Optimizer:
style_fmaps = feature_maps.get_fmaps(style_img) style_fmaps = feature_maps.get_fmaps(style_img)
content_generated_fmaps = feature_maps.get_fmaps(content_img_clone) content_generated_fmaps = feature_maps.get_fmaps(content_img_clone)
total_loss = Loss.total_loss(alpha, beta, content_fmaps, style_fmaps, content_generated_fmaps) total_loss,total_cont_loss,total_style_loss = Loss.total_loss(alpha, beta, content_fmaps, style_fmaps, content_generated_fmaps)
# clears x.grad for every parameter x in the optimizer. # clears x.grad for every parameter x in the optimizer.
# It’s important to call this before total_loss.backward(), otherwise it will accumulate the gradients from multiple passes. # It’s important to call this before total_loss.backward(), otherwise it will accumulate the gradients from multiple passes.
...@@ -46,6 +46,6 @@ class Optimizer: ...@@ -46,6 +46,6 @@ class Optimizer:
optimizer.step() optimizer.step()
#plt.clf() #plt.clf()
#plt.plot(content_img_clone) #plt.plot(content_img_clone)
if(e%10): if(e%10 == 0):
LOGGER.info(f"Epoch = {e} Total Loss = {total_loss}") LOGGER.info(f"Epoch = {e} Total Loss = {total_loss} Style Loss = {total_cont_loss} Content Loss = {total_style_loss}")
save_image(content_img_clone,"styled.png") save_image(content_img_clone,"styled.png")
\ No newline at end of file
...@@ -35,9 +35,10 @@ class Preprocessor: ...@@ -35,9 +35,10 @@ class Preprocessor:
@params @params
img: 3d numpy array img: 3d numpy array
''' '''
loader = transforms.Compose([transforms.ToTensor(),transforms.Resize([512,512]),transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225],),]) #loader = transforms.Compose([transforms.ToTensor(),transforms.Resize([224,224]),transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225],),])
loader = transforms.Compose([transforms.ToTensor(),transforms.Resize([224,224])])
img = loader(img).unsqueeze(0) img = loader(img).unsqueeze(0)
assert img.shape == (1,3,512,512) assert img.shape == (1,3,224,224)
return img.to(device,torch.float) return img.to(device,torch.float)
......
1508.06576/styled.png

187 KB | W: | H:

1508.06576/styled.png

104 KB | W: | H:

1508.06576/styled.png
1508.06576/styled.png
1508.06576/styled.png
1508.06576/styled.png
  • 2-up
  • Swipe
  • Onion skin
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment