dataset.py 1.18 KB
import numpy as np
import random
import pandas as pd
from keras.preprocessing.text import Tokenizer
from utils import *

def save_dataset(path, pairData, pairLabels, compressed=True):
    if compressed:
        np.savez_compressed(path, pairData=pairData, pairLabels=pairLabels)
    else:
        np.savez(path, pairData=pairData, pairLabels=pairLabels)

def load_dataset(path):
    data = np.load(path, allow_pickle=True)
    return (data['pairData'], data['pairLabels'])

def make_dataset_small(path): # couldn't make dataser for shuffled/merged/obfuscated, as memory run out.
    vecs = np.load(path, allow_pickle=True)['vecs']

    pairData = []
    pairLabels = [] # 1 for plagiarism
    
    # original pair
    for i in range(len(vecs)):
        currentData = vecs[i]

        pairData.append([currentData, currentData])
        pairLabels.append([1])

        j = i
        while j == i:
            j = random.randint(0, len(vecs) - 1)

        pairData.append([currentData, vecs[j]])
        pairLabels.append([0])

    return (np.array(pairData), np.array(pairLabels))

def load_embedding(path):
    data = np.load(path, allow_pickle=True)
    return (data['vocab_size'], data['embedding_matrix'])