import gensim
import numpy as np
import pandas as pd
import text_preprocessing
from collections import defaultdict
from gensim.test.utils import get_tmpfile
from export_jobs_resumes_csv import get_jobs_resumes_csv 
from clean_dataframe import clean_jobs_resume_csv





#get jobs and resumes csvs
try:
    get_jobs_resumes_csv()
except Exception as e:
    print("Details attached here. ", e)
    

#clean jobs and resumes csvs
try:
    clean_jobs_resume_csv()
except Exception as e:
    print("Details attached here. ", e)


    
    
#read the clean jobs file
j_df = pd.read_csv("/Users/rberi/Insync/Development/sites/talenthub/backend/ai/resume-to-job-match/clean_jobs.csv")

print(j_df.head())
print(j_df.describe())


#check the len of unique values
print(len(j_df['id'].unique()))



#make jobs tokens from jobs dataframe
jobs_tokens = []
#for i in range(0, len(j_df)):
for i in range(0, len(j_df)):
    raw = str(j_df.iloc[i]['details'])
    clean_tokens = text_preprocessing.preprocess_text(raw)
    jobs_tokens.append([j_df.iloc[i]['id'], clean_tokens])

#print(jobs_tokens)

print(len(jobs_tokens))


#get only the tokens
jobs_token_list = np.array(jobs_tokens)[:,1].tolist()

print(len(jobs_token_list))



# remove words that appear only once
frequency = defaultdict(int)
for row in jobs_token_list:
    for token in row:
        frequency[token] += 1

jobs_token_list = [
    [token for token in row if frequency[token] > 1]
    for row in jobs_token_list
]





# Create the tagged document needed for Doc2Vec
def create_train_tagged_document(list_of_list_of_words, tokens_only=False):
    for i, list_of_words in enumerate(list_of_list_of_words):    
        if tokens_only:
                yield list_of_words
        else:
            # For training data, add tags
            yield gensim.models.doc2vec.TaggedDocument(list_of_words, [str(jobs_tokens[i][0])])
        

train_data = list(create_train_tagged_document(jobs_token_list))

#print(train_data)
print(len(train_data))




#read the existing clean resumes file
r_df= pd.read_csv("clean_resumes.csv")

#check the len of unique values
print(len(r_df['resume_id'].unique()))
print(len(r_df['person_id'].unique()))



#make tokens from resume dataframe
resumes_tokens=[]

for i in range(0, len(r_df)):    
    raw = str(r_df.iloc[i]['resume_details'])
    clean_tokens = text_preprocessing.preprocess_text(raw)
    resumes_tokens.append([r_df.iloc[i]['resume_id'], clean_tokens])
#print(resumes_tokens)


#only get tokens
resumes_token_list = np.array(resumes_tokens)[:,1].tolist()


# remove words that appear only once
frequency = defaultdict(int)
for row in resumes_token_list:
    for token in row:
        frequency[token] += 1

resumes_token_list = [
    [token for token in row if frequency[token] > 1]
    for row in resumes_token_list
]


# Create the tagged document needed for Doc2Vec
def create_test_tagged_document(list_of_list_of_words, tokens_only=False):
    for i, list_of_words in enumerate(list_of_list_of_words):    
        if tokens_only:
                yield list_of_words
        else:
            # For training data, add tags
            yield gensim.models.doc2vec.TaggedDocument(list_of_words, [str(resumes_tokens[i][0])])
        

test_data = list(create_test_tagged_document(resumes_token_list))
