# Standard imports
import re, string, unicodedata
import numpy as np
import nltk
import inflect
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer, WordNetLemmatizer



def text_to_sentence_tokens(text):
    """Convert text into list of tokenized sentences"""
    sentences = nltk.sent_tokenize(text)
    return sentences


def text_to_words_tokens(text):
    """Convert text into list of tokenized words"""
    words = nltk.word_tokenize(text)
    return words

def text_to_words(text):
    """Convert string text into list of tokenized words"""
    words = word_tokenize(text)
    return words


def remove_special_tags(words):
    """Clean special tags from list of tokenized words"""
    new_words = []
    for word in words:

        rules = [
            {r'>\s+': u'>'},  # remove spaces after a tag opens or closes
            {r'\s+': u' '},  # replace consecutive spaces
            {r'\s*<br\s*/?>\s*': u'\n'},  # newline after a <br>
            {r'</(div)\s*>\s*': u'\n'},  # newline after </p> and </div> and <h1/>...
            {r'</(p|h\d)\s*>\s*': u'\n\n'},  # newline after </p> and </div> and <h1/>...
            {r'<head>.*<\s*(/head|body)[^>]*>': u''},  # remove <head> to </head>
            {r'<a\s+href="([^"]+)"[^>]*>.*</a>': r'\1'},  # show links instead of texts
            {r'[ \t]*<[^<]*?/?>': u''},  # remove remaining tags
            {r'^\s+': u''}  # remove spaces at the beginning
        ]
        for rule in rules:
            for (k, v) in rule.items():
                regex = re.compile(k)
                word = regex.sub(v, word)
        word = word.rstrip()
        new_word = word.replace('\\n', ' ')
        new_words.append(new_word)
    return new_words



def remove_non_ascii(words):
    """Remove non-ASCII characters from list of tokenized words"""
    new_words = []
    for word in words:
        new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')
        new_words.append(new_word)
    return new_words

def remove_apostrophe(words):
    """Remove apostrophe from list of tokenized words"""
    new_words = []
    for word in words:
        new_word = np.char.replace(word, "'", "")
        new_words.append(new_word)
    return new_words


def to_lowercase(words):
    """Convert all characters to lowercase from list of tokenized words"""
    new_words = []
    for word in words:
        new_word = word.lower()
        new_words.append(new_word)
    return new_words

def remove_punctuation(words):
    """Remove punctuation from list of tokenized words"""
    new_words = []
    for word in words:
        new_word = re.sub(r'[^\w\s]', '', word)
        if new_word != '':
            new_words.append(new_word)
    return new_words


#
def remove_special_chars(words):

    """Remove special characters from list of tokenized words"""
    new_words = []
    for word in words:
        symbols = "!\"#$%&()*+-./:;<=>?@[\]^_`{|}~\n"
        for i in range(len(symbols)):
            word = np.char.replace(word, symbols[i], ' ')
            word = np.char.replace(word, "  ", " ")
        word = np.char.replace(word, ',', '')

        new_word = np.char.replace(word, "'", "")
        new_words.append(new_word)
    return new_words



def replace_numbers(words):
    """Replace all interger occurrences in list of tokenized words with textual representation"""
    p = inflect.engine()
    new_words = []
    for word in words:
        if word.isdigit():
            new_word = p.number_to_words(word)
            new_words.append(new_word)
        else:
            new_words.append(word)
    return new_words

def remove_stopwords(words):
    """Remove stop words from list of tokenized words"""
    new_words = []
    for word in words:
        # print(word)
        if word not in stopwords.words('english'):
            new_words.append(word)
    return new_words

def stemming_words(words):
    """Stem words in list of tokenized words"""
    stemmer = LancasterStemmer()
    stems = []
    for word in words:
        stem = stemmer.stem(word)
        stems.append(stem)
    return stems

def lemmatize_verbs(words):
    """Lemmatize verbs in list of tokenized words"""
    lemmatizer = WordNetLemmatizer()
    lemmas = []
    for word in words:
        lemma = lemmatizer.lemmatize(word, pos='v')
        lemmas.append(lemma)
    return lemmas


def preprocess_text(text):
    """ preprocess the text into list of tokenized words"""
    words_tokens = text_to_words_tokens(text)
    words_tokens = remove_non_ascii(words_tokens)
    words_tokens = to_lowercase(words_tokens)
    words_tokens = remove_punctuation(words_tokens)
    words_tokens = replace_numbers(words_tokens)
    words_tokens = remove_stopwords(words_tokens)
    words_tokens = stemming_words(words_tokens)
    words_tokens = lemmatize_verbs(words_tokens)
    return words_tokens



