Datasets:

Modalities:
Text
Formats:
json
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
File size: 3,039 Bytes
bcd6255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import os
import pydicom
import nibabel as nib
import pandas as pd
import dicom2nifti
from tqdm import tqdm
import concurrent.futures

def get_knee_side(dicom_path):
    """
    Reads DICOM metadata and extracts the knee side from SeriesDescription.
    Returns:
        int: 1 for RIGHT, 2 for LEFT (based on SeriesDescription), or None on error.
    """
    try:
        ds = pydicom.dcmread(dicom_path, stop_before_pixels=True)
        series_desc = getattr(ds, "SeriesDescription", "").upper()
        print(series_desc)
        return 'RIGHT' if series_desc == "SAG_3D_DESS_RIGHT" else 'LEFT'
    except Exception as e:
        print(f"Error reading DICOM file {dicom_path}: {e}")
        return None

def convert_dcm_to_nifti(dicom_path, save_image_filepath):
    """
    Converts DICOM files to NIfTI format.
    Args:
        dicom_path (str): Path to the DICOM file.
        save_image_filepath (str): Path to save the NIfTI file.
    """
    dicom2nifti.dicom_series_to_nifti(dicom_path, save_image_filepath, reorient_nifti=True)
    print(f"Saved {save_image_filepath}")
    return save_image_filepath

def process_row(row, save_image_folder):
    """
    Process a single row from the DataFrame.
    """
    patient_id = row['patient_id']
    time = row['time']
    study_folder = row['study_folder']
    series_folder = row['series_folder']
    knee_side = row['knee_side']
    path = row['path']
    
    save_image_filepath = os.path.join(save_image_folder, str(patient_id), str(time), 
                                       f'{study_folder}_{series_folder}_{knee_side}.nii.gz')
    
    if os.path.exists(save_image_filepath):
        print(f"Skipping {save_image_filepath} because it already exists")
    else:
        os.makedirs(os.path.dirname(save_image_filepath), exist_ok=True)
        convert_dcm_to_nifti(path, save_image_filepath)
    
    row_copy = row.copy()
    row_copy['save_image_filepath'] = save_image_filepath
    return row_copy

if __name__ == "__main__":
    save_image_folder = '/data/images'
    df = pd.read_csv('/data/all_studies.csv')
    
    # Define number of workers (threads)
    max_workers = 8  # You can adjust this based on your CPU cores
    save_rows = []
    # Process in batches if memory is a concern
    batch_size = 100
    for i in range(0, len(df), batch_size):
        batch_df = df.iloc[i:i+batch_size]
        
        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            futures = [executor.submit(process_row, row, save_image_folder) 
                    for _, row in batch_df.iterrows()]
            
            for future in tqdm(concurrent.futures.as_completed(futures), 
                            total=len(futures), 
                            desc=f"Batch {i//batch_size+1}/{(len(df)+batch_size-1)//batch_size}"):
                try:
                    result = future.result()
                    save_rows.append(result)
                except Exception as e:
                    print(f"Processing error: {e}")