diff --git a/Yolo_train.py b/Yolo_train.py
index c15ee01be0fe75ac404fdb1cae152954170dac42..0915595b34118dea326d6dbd52e14a5d25b02b05 100644
--- a/Yolo_train.py
+++ b/Yolo_train.py
@@ -1,10 +1,7 @@
 import os, sys
 import torch
-from random import random
 from datetime import datetime
-from PIL import Image
 import json
-
 from torchvision import transforms
 import torch.optim as optim
 from torch.utils.data import DataLoader
@@ -15,14 +12,6 @@ from Yolo_loss import YoloLoss
 from Yolo_v1_fcs import Yolo_v1_fcs
 import utils
 
-# if directories for saved models does not exist -> make dirs 
-# dir structure -> <year>/<month>/<day>/<save_file>
-model_save_folder_name = fr"saved_models\{datetime.strftime(datetime.now(), '%Y_%m_%d_%H_%M')}"
-if (not os.path.isdir("saved_models")):
-    os.mkdir("saved_models")
-if (not os.path.isdir(model_save_folder_name)):
-    os.mkdir(model_save_folder_name)
-
 transform = transforms.Compose([
 #   transforms.Resize(256),
   transforms.Resize((224, 224)),
@@ -32,8 +21,11 @@ transform = transforms.Compose([
 ])
 
 
-def train_one_epoch(yolo_network, train_loader):
-    running_loss = []
+def train_one_epoch(yolo_network, train_loader, print_status_every=200):
+    """
+    returns the avg training loss ovet the epoch
+    """
+    running_loss = 0.
     
     # get the inputs; data is a list of [inputs, labels]
     for i, data in enumerate(train_loader):
@@ -51,28 +43,44 @@ def train_one_epoch(yolo_network, train_loader):
         optimizer.step()
 
         # print statistics
-        running_loss.append(loss.item())
+        running_loss += loss.item()
 
-        if i % 200 == 199: # print every 200 mini-batches
-            print(f'[{epoch + 1}, {i + 1:5d}] loss: {sum(running_loss) / 200:.3f}')
-            running_loss = []
-    
+        if i % print_status_every == 0 and not i == 0:
+            print(f'[{epoch + 1}, {i + 1:5d}] avg. loss: {running_loss/(i+1):.3f}', end="\r")
+
+    avg_running_loss = running_loss / (i+1)
+    print(f'{""*20}\rEpoch {epoch + 1}: avg. loss: {avg_running_loss:.3f}')
+    return avg_running_loss
 
 if __name__ == '__main__':
     
-    EPOCHS = 10
-    BATCH_SIZE = 32
+    EPOCHS = 500
+    BATCH_SIZE = 1
+    SAVE_MODELS = False
+    
+    if SAVE_MODELS:
+        # if directories for saved models does not exist -> make dirs 
+        # dir structure -> <year>/<month>/<day>/<save_file>
+        model_save_folder_name = fr"saved_models\{datetime.strftime(datetime.now(), '%Y_%m_%d_%H_%M')}"
+        if (not os.path.isdir("saved_models")):
+            os.mkdir("saved_models")
+        if (not os.path.isdir(model_save_folder_name)):
+            os.mkdir(model_save_folder_name)
     
     best_avg_val_loss = float("inf")
+    
+    training_losses = []
     validation_losses = []
 
     print("creating train dataset and loader")
-    train_dataset = CocoDataSet("./data/train2014", "./data/labels/train2014", transform, 0) 
+    train_dataset = CocoDataSet("./data/simple_train", "./data/simple_labels/train", transform, 0) 
+    # train_dataset = CocoDataSet("./data/train2014", "./data/labels/train2014", transform, 0) 
     train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
                                             shuffle=True, num_workers=0)
 
     print("creating val dataset and loader")
-    val_dataset = CocoDataSet("./data/val2014", "./data/labels/val2014", transform, 0) 
+    val_dataset = CocoDataSet("./data/simple_val", "./data/simple_labels/val", transform, 0) 
+    # val_dataset = CocoDataSet("./data/val2014", "./data/labels/val2014", transform, 0) 
     val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE,
                                             shuffle=True, num_workers=0)
 
@@ -99,10 +107,11 @@ if __name__ == '__main__':
         print(f"Epoch {epoch+1}: ", end="")
         
         yolo_network.train(True)
-        train_one_epoch(yolo_network, train_loader)
-        
+        training_loss = train_one_epoch(yolo_network, train_loader)
+        training_losses.append(training_loss)
+
         yolo_network.train(False)
-        print("Start Validation")
+        print("Start Validation", end="\r")
         running_val_loss = 0
         for i, val_data in enumerate(val_loader):
             
@@ -119,15 +128,18 @@ if __name__ == '__main__':
         avg_val_loss = running_val_loss/(i+1)
 
         validation_losses.append(avg_val_loss)
-        print(avg_val_loss)
+        print(f"Avg validation loss: {avg_val_loss}", end="")
         
-        if (avg_val_loss < best_avg_val_loss):
+        if (SAVE_MODELS and avg_val_loss < best_avg_val_loss):
             best_avg_val_loss = avg_val_loss
             utils.save_model(yolo_network, optimizer, f"epoch_{epoch+1}", model_save_folder_name)
+        else:
+            print()
 
-    with open(fr"{model_save_folder_name}\validation_loss.json", "w") as file:
-        file.write(json.dumps(validation_losses))
+    if SAVE_MODELS:
+        with open(fr"{model_save_folder_name}\validation_loss.json", "w") as file:
+            file.write(json.dumps(validation_losses))
 
     print('Finished Training')
 
-    utils.draw_random_image(yolo_network, encoder, val_dataset, device)
\ No newline at end of file
+    utils.draw_random_image(yolo_network, encoder, train_dataset, device)
\ No newline at end of file
diff --git a/Yolo_v1_fcs.py b/Yolo_v1_fcs.py
index 80164db3286ad9e12d62a304778efef1953c919a..37ae5aae6a6478d976132b3c3ce233625d012ea5 100644
--- a/Yolo_v1_fcs.py
+++ b/Yolo_v1_fcs.py
@@ -35,4 +35,4 @@ if __name__ == '__main__':
     yolo = Yolo_v1_fcs(1280*7*7)
     thing = torch.rand((1,1280,7,7))
     print(torch.Tensor.size(thing))
-    print(yolo(thing))
\ No newline at end of file
+    print(yolo(thing))
diff --git a/cocoDataset.py b/cocoDataset.py
index 8271ac8fe958874dd3e9c097c0e4eb3d856ceed2..59fd8c5bb11f26db417b9ddeac2da0d068294e71 100644
--- a/cocoDataset.py
+++ b/cocoDataset.py
@@ -56,10 +56,7 @@ class CocoDataSet(Dataset):
         x = io.imread(self.x[index])
         if len(x.shape) < 3:
             x = gray2rgb(x)
-        y = None
 
-        # with open(self.y[index], "r") as file:
-        #     y = [line.strip(" \n").split(" ") for line in file.readlines()]        
         y = self.read_annotation_file(self.y[index])
 
         # set confidence/probability to 1 for person and 0 otherwise
diff --git a/utils.py b/utils.py
index 8ceabf07938ecdebd77d50b03c47f97ff3f00ba2..a5e3c53e471c25a4f8e8116507fe8a4f5fc88f6a 100644
--- a/utils.py
+++ b/utils.py
@@ -10,9 +10,10 @@ from random import random
 import matplotlib.pyplot as plt
 
 def draw_random_image(yolo_network, encoder, dataset, device, threshhold=0.5):
-    random_image_index = math.ceil(random() * len(dataset.x))
+    random_image_index = math.floor(random() * len(dataset.x))
     image = Image.open(dataset.x[random_image_index])
     labels = dataset.read_annotation_file(dataset.y[random_image_index])
+    training_labels = dataset[random_image_index][1]
 
     preprocessed_image, _ = dataset[random_image_index]
     preprocessed_image = preprocessed_image.unsqueeze(0).to(device)
@@ -22,6 +23,8 @@ def draw_random_image(yolo_network, encoder, dataset, device, threshhold=0.5):
 
     fig, ax = plt.subplots()
 
+    print(nn_output[0])
+
     predicted_bounding_boxes = filter_predictions(nn_output[0], threshhold=threshhold)
     predicted_bounding_boxes = generalize_prediction(predicted_bounding_boxes)
 
@@ -37,6 +40,7 @@ def draw_random_image(yolo_network, encoder, dataset, device, threshhold=0.5):
 
     print(f"{len(predicted_bounding_boxes)} good bounding boxes")
     print(predicted_bounding_boxes)
+    print(f"labels: {training_labels}")
     ax.imshow(image)
     plt.show()
 
@@ -102,7 +106,7 @@ def save_model(yolo_network, optimizer, file_name, folder=fr"saved_models\{datet
     file_name = fr"{folder}\{file_name}.pth.tar"
     torch.save(state, file_name)
 
-    print(f"Saved model as '{file_name}'", end="\n\n")
+    print(f" - Saved model as '{file_name}'", end="\n\n")
 
 def load_YoloModel_and_optimizer(YoloModel, optimizer, load_file, folder=fr"saved_models\{datetime.strftime(datetime.now(), '%Y_%m_%d')}"):
     print(f"___Loading model and optimizer states from {load_file}___")