import wandb
import random

# start a new wandb run to track this script
wandb.init(
    # set the wandb project where this run will be logged
    project="my-jean-zay-training",
    
    # set experiment name
    name="Llama-training",
    # Add your jobid in your experiment name is a good pratice
    # name="my-jean-zay-training"+str(os.environ["SLURM_JOBID"])
    
    # track hyperparameters and run metadata
    config={
    "learning_rate": 0.02,
    "architecture": "LLama5",
    "dataset": "CommonCorpus",
    "epochs": 10,
    }
    
    # You can set W&B offline mode inside you training script !
    #mode="offline",
)

# simulate training
epochs = 10
offset = random.random() / 5
for epoch in range(2, epochs):
    acc = 1 - 2 ** -epoch - random.random() / epoch - offset
    loss = 2 ** -epoch + random.random() / epoch + offset

    # log metrics to wandb
    wandb.log({"acc": acc, "loss": loss})
    
# Mark the run as finished
wandb.finish()