| |
| import argparse |
| import gc |
| import json |
| import os |
|
|
| import datasets |
| import pandas as pd |
| import torch |
| from tqdm import tqdm |
| from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
|
| TOTAL_NUM_FILES_C4_TRAIN = 1024 |
|
|
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser() |
|
|
| parser.add_argument( |
| "--start", |
| type=int, |
| required=True, |
| help="Starting file number to download. Valid values: 0 - 1023", |
| ) |
| parser.add_argument( |
| "--end", |
| type=int, |
| required=True, |
| help="Ending file number to download. Valid values: 0 - 1023", |
| ) |
| parser.add_argument("--batch_size", type=int, default=16, help="Batch size") |
| parser.add_argument( |
| "--model_name", |
| type=str, |
| default="taskydata/deberta-v3-base_10xp3nirstbbflanseuni_10xc4", |
| help="Model name", |
| ) |
| parser.add_argument( |
| "--local_cache_location", |
| type=str, |
| default="c4_download", |
| help="local cache location from where the dataset will be loaded", |
| ) |
| parser.add_argument( |
| "--use_local_cache_location", |
| type=bool, |
| default=True, |
| help="Set True if you want to load the dataset from local cache.", |
| ) |
| parser.add_argument( |
| "--clear_dataset_cache", |
| type=bool, |
| default=False, |
| help="Set True if you want to delete the dataset files from the cache after inference.", |
| ) |
| parser.add_argument( |
| "--release_memory", |
| type=bool, |
| default=True, |
| help="Set True if you want to release the memory of used variables.", |
| ) |
|
|
| args = parser.parse_args() |
| return args |
|
|
|
|
| def chunks(l, n): |
| for i in range(0, len(l), n): |
| yield l[i : i + n] |
|
|
|
|
| def batch_tokenize(data, batch_size): |
| batches = list(chunks(data, batch_size)) |
| tokenized_batches = [] |
| for batch in batches: |
| |
| tensor = tokenizer( |
| batch, |
| return_tensors="pt", |
| padding="max_length", |
| truncation=True, |
| max_length=512, |
| ) |
| tokenized_batches.append(tensor) |
| return tokenized_batches, batches |
|
|
|
|
| def batch_inference(data, batch_size=16): |
| preds = [] |
| tokenized_batches, batches = batch_tokenize(data, batch_size) |
| for i in tqdm(range(len(batches))): |
| with torch.no_grad(): |
| logits = model(**tokenized_batches[i].to(device)).logits.cpu() |
| preds.extend(logits) |
| return preds |
|
|
|
|
| if __name__ == "__main__": |
| args = parse_args() |
|
|
| tokenizer = AutoTokenizer.from_pretrained(args.model_name) |
| model = AutoModelForSequenceClassification.from_pretrained(args.model_name) |
| device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
| model.to(device) |
|
|
|
|
| if args.use_local_cache_location: |
| file_name = f"c4-train.{global_id}.json.gz" |
| data_files = {"train": f"{args.local_cache_location}/{file_name}"} |
| c4 = datasets.load_dataset("json", data_files=data_files, split="train") |
| else: |
| file_name = f"en/c4-train.{global_id}.json.gz" |
| data_files = {"train": file_name} |
| c4 = datasets.load_dataset( |
| "allenai/c4", data_files=data_files, split="train" |
| ) |
| df = pd.DataFrame(c4, index=None) |
| texts = df["text"].to_list() |
| preds = batch_inference(texts, batch_size=args.batch_size) |
|
|
| assert len(preds) == len(texts) |
|
|
| |
| |
| |
| df['timestamp'] = df['timestamp'].astype(str) |
| with open(c4taskyprobas_path, "w") as f, open(c4tasky_path, "w") as g: |
| for i in range(len(preds)): |
| predicted_class_id = preds[i].argmax().item() |
| pred = model.config.id2label[predicted_class_id] |
| tasky_proba = torch.softmax(preds[i], dim=-1)[-1].item() |
| f.write(json.dumps({"proba": tasky_proba}) + "\n") |
| |
| if int(predicted_class_id) == 1: |
| g.write( |
| json.dumps( |
| { |
| "proba": tasky_proba, |
| "text": texts[i], |
| "timestamp": df["timestamp"][i], |
| "url": df["url"][i], |
| } |
| ) |
| + "\n" |
| ) |
| |
| if args.release_memory: |
| del preds |
| del texts |
| del df |
| gc.collect() |
|
|
| |
| if args.clear_dataset_cache: |
| os.remove(f"{args.local_cache_location}/{file_name}") |