I am training a CNN for an audio classification task, and I am using TensorFlow 2.0 RC with a custom training loop (as explained in this guide from their official site). I would find it really handy to have a nice progress bar, similarly to the usual Keras model.fit.
This is an outline of my training code (I am using 4 GPU's, with a mirrored distribution strategy):
strategy = distribute.MirroredStrategy()
distr_train_dataset = strategy.experimental_distribute_dataset(train_dataset)
if valid_dataset:
    distr_valid_dataset = strategy.experimental_distribute_dataset(valid_dataset)
with strategy.scope():
    model = build_model() # build the model
    optimizer = # define optimizer
    train_loss = # define training loss
    train_metrics_1 = # AUC-ROC
    train_metrics_2 = # AUC-PR
    valid_metrics_1 = # AUC-ROC for validation
    valid_metrics_2 = # AUC-PR for validation
    # rescale loss
    def compute_loss(labels, predictions):
        per_example_loss = train_loss(labels, predictions)
        return per_example_loss/config.batch_size
    def train_step(batch):
        audio_batch, label_batch = batch
        with tf.GradientTape() as tape:
            logits = model(audio_batch)
            loss = compute_loss(label_batch, logits)
        variables = model.trainable_variables
        grads = tape.gradient(loss, variables)
        optimizer.apply_gradients(zip(grads, variables))
        train_metrics_1.update_state(label_batch, logits)
        train_metrics_2.update_state(label_batch, logits)
        train_mean_loss.update_state(loss)
        return loss
    def valid_step(batch):
        audio_batch, label_batch = batch
        logits = model(audio_batch, training=False)
        loss = compute_loss(label_batch, logits)
        val_metrics_1.update_state(label_batch, logits)
        val_metrics_2.update_state(label_batch, logits)
        val_loss.update_state(loss)
        return loss
    @tf.function 
    def distributed_train(batch):
        num_batches = 0
        for batch in distr_train_dataset:
            num_batches += 1
            strategy.experimental_run_v2(train_step, args=(batch, ))
            # print progress here
            tf.print('Step', num_batches, '; Loss', train_mean_loss.result(), '; ROC_AUC', train_metrics_1.result(), '; PR_AUC', train_metrics_2.result())
            gc.collect()
    @tf.function
    def distributed_valid(batch):
        for batch in distr_valid_dataset:
            strategy.experimental_run_v2(valid_step, args=(batch, ))
            gc.collect()
for epoch in range(epochs):
    distributed_train(distr_train_dataset)
    gc.collect()
    train_metrics_1.reset_states()
    train_metrics_2.reset_states()
    train_mean_loss.reset_states()
    if valid_dataset:
        distributed_valid(distr_valid_dataset)
        gc.collect()
        val_metrics_1.reset_states()
        val_metrics_2.reset_states()
        val_loss.reset_states()
Here train_dataset and valid_dataset are two tf.data.TFRecordDataset generated with the usual tf.data input pipeline.
TensorFlow provides a really nice tf.keras.utils.Progbar (which is indeed what you see when you train using model.fit). I have taken a look at its source code, and it relies on numpy, so I can't use it in place of the tf.print() statement (which is executed in graph mode).
How can I implement a similar progress bar in my custom training loop (with my training function running in graph mode)?
How does model.fit display a progress bar in the first place?
Progress bar for the custom training loop can be generated using following code:
from tensorflow.keras.utils import Progbar
import time 
import numpy as np
metrics_names = ['acc','pr'] 
num_epochs = 5
num_training_samples = 100
batch_size = 10
for i in range(num_epochs):
    print("\nepoch {}/{}".format(i+1,num_epochs))
    
    pb_i = Progbar(num_training_samples, stateful_metrics=metrics_names)
    
    for j in range(num_training_samples//batch_size):
        
        time.sleep(0.3)
        
        values=[('acc',np.random.random(1)), ('pr',np.random.random(1))]
        
        pb_i.add(batch_size, values=values)
Output :
epoch 1/5
100/100 [==============================] - 3s 30ms/step - acc: 0.2169 - pr: 0.9011
epoch 2/5
100/100 [==============================] - 3s 30ms/step - acc: 0.7815 - pr: 0.4900
epoch 3/5
100/100 [==============================] - 3s 30ms/step - acc: 0.8003 - pr: 0.9292
epoch 4/5
100/100 [==============================] - 3s 30ms/step - acc: 0.8280 - pr: 0.9113
epoch 5/5
100/100 [==============================] - 3s 30ms/step - acc: 0.8497 - pr: 0.1929
                        @Shubham Malaviya answer is perfect.
I just want to further extend it when interacting with tf.data.Dataset. This code is also based on this answer.
import tensorflow as tf
import numpy as np
import time 
# From https://www.tensorflow.org/guide/data#reading_input_data
(images_train, labels_train), (images_test, labels_test) = tf.keras.datasets.fashion_mnist.load_data()
images_train = images_train/255
images_test = images_test/255
dataset_train = tf.data.Dataset.from_tensor_slices((images_train, labels_train))
dataset_test = tf.data.Dataset.from_tensor_slices((images_test, labels_test))
# From @Shubham Malaviya https://stackoverflow.com/a/60094207/8682939
metrics_names = ['train_loss','val_loss'] 
num_epochs = 2
num_training_samples = images_train.shape[0]
batch_size = 10
# Loop on each epoch
for epoch in range(num_epochs):
  print("\nepoch {}/{}".format(epoch+1,num_epochs))
  progBar = tf.keras.utils.Progbar(num_training_samples, stateful_metrics=metrics_names)
  # Loop on each batch of train dataset
  for idX, (batch_x, batch_y) in enumerate(dataset_train.batch(batch_size)): 
    # Train the model
    train_loss = np.random.random(1)
    values=[('train_loss',train_loss)]
    progBar.update(idX*batch_size, values=values) 
  # Loop on each batch of test dataset for validation
  for batch_x, batch_y in dataset_test.batch(batch_size):
    # Foward image through the network
    # -----
    # Calc the loss
    val_loss = np.random.random(1)
  # Update progBar with val_loss
  values=[('train_loss',train_loss),('val_loss',val_loss)]
  progBar.update(num_training_samples, values=values, finalize=True)
Output:
epoch 1/2 60000/60000 [==============================] - 1s 22us/step
- train_loss: 0.7019 - val_loss: 0.0658
epoch 2/2 60000/60000 [==============================] - 1s 21us/step
- train_loss: 0.5561 - val_loss: 0.0324
                        If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With