| import subprocess |
| from datasets import load_dataset, Dataset |
| import json |
| from tqdm import tqdm |
|
|
| ds = load_dataset("bigcode/the-stack-smol", data_dir='data/typescript') |
|
|
| def split_ts_into_chunks(ts_code): |
| result = subprocess.run( |
| ['node', 'parse_ts.js'], |
| input=ts_code, |
| text=True, |
| ) |
|
|
| if result.returncode != 0: |
| raise Exception('Error in TypeScript parsing') |
|
|
| with open('semantic_chunks.jsonl', 'r') as file: |
| lines = file.read().splitlines() |
|
|
| chunks = [json.loads(line) for line in lines] |
| with open('semantic_chunks.jsonl', 'w'): |
| pass |
|
|
| return chunks |
|
|
|
|
| def chunk_ts_file(data): |
| funcs = split_ts_into_chunks(data['content']) |
| for i in range(len(funcs)): |
| funcs[i]['repo'] = data['repository_name'] |
| funcs[i]['path'] = data['path'] |
| funcs[i]['language'] = data['lang'] |
| return funcs |
|
|
| chunks = [] |
| for i in tqdm(range(len(ds['train']))): |
| chunk = chunk_ts_file(ds['train'][i]) |
| chunks +=(chunk) |
| if i%100 == 0: |
| print(len(chunks)) |
|
|
| dataset = Dataset.from_list(chunks) |
| print(dataset) |
| dataset.to_json('ts-chunks.json') |
|
|