Skip to content Skip to sidebar Skip to footer

Extracting N-grams From Tweets In Python

Say that I have 100 tweets. In those tweets, I need to extract: 1) food names, and 2) beverage names. Example of tweet: 'Yesterday I had a coca cola, and a hot dog for lunch, and

Solution 1:

Not sure what you have tried so far, below is a solution using ngrams in nltk and dict()

from nltk import ngrams

tweet = "Yesterday I had a coca cola, and a hot dog for lunch, and some bana split for desert. I liked the coke, but the banana in the banana split dessert was ripe"# Your lexicons
lexicon_food = ["hot dog", "banana", "banana split"]
lexicon_beverage = ["coke", "cola", "coca cola"]
lexicon_dict = {x: [x, 'Food'] for x in lexicon_food}
lexicon_dict.update({x: [x, 'Beverage'] for x in lexicon_beverage})

# Function to extract lexicon itemsdefextract(g, lex):
    if' '.join(g) in lex.keys():
        return lex.get(' '.join(g))
    elif g[0] in lex.keys():
        return lex.get(g[0])
    else:
        pass# Your task
out = [[extract(g, lexicon_dict) for g in ngrams(sentence.split(), 2) if extract(g, lexicon_dict)] 
        for sentence in tweet.replace(',', '').lower().split('.')]
print(out)

Output:

[[['coca cola', 'Beverage'], ['cola', 'Beverage'], ['hot dog', 'Food']], 
 [['coke', 'Beverage'], ['banana', 'Food'], ['banana split', 'Food']]]

Approach 2 (Avoid "coca cola" and "cola")

def extract2(sentence, lex):
    extracted_words = []
    words = sentence.split()
    i = 0
    while i < len(words):
        if ' '.join(words[i:i+2]) in lex.keys():
            extracted_words.append(lex.get(' '.join(words[i:i+2])))
            i += 2
        elif words[i] in lex.keys():
            extracted_words.append(lex.get(words[i]))
            i += 1
        else:
            i += 1
    return extracted_words

out = [extract2(s, lexicon_dict) for s in tweet.replace(',', '').lower().split('.')]
print(out)

Output:

[[['coca cola', 'Beverage'], ['hot dog', 'Food']], 
 [['coke', 'Beverage'], ['banana', 'Food'], ['banana split', 'Food']]]

Noted that nltk is not needed here.

Solution 2:

Here a simple solution:

import re

deflexicon_by_word(lexicons):
    return {word:key for key in lexicons.keys() for word in lexicons[key]}



defsplit_sentences(st):
    sentences = re.split(r'[.?!]\s*', st)
    if sentences[-1]:
        return sentences
    else:
        return sentences[:-1]

defngrams_finder(lexicons, text):
    lexicons_by_word = lexicon_by_word(lexicons)
    defpattern(lexicons):
        pattern = "|".join(lexicons_by_word.keys())
        pattern = re.compile(pattern)
        return pattern
    pattern = pattern(lexicons) 
    ngrams = []
    for sentence in split_sentences(text):
        try:
            ngram = []
            for result in pattern.findall(sentence):
                ngram.append([result, lexicons_by_word[result]])
            ngrams.append(ngram)
        except IndexError: #if re.findall does not find anythingcontinuereturn ngrams

# You could customize it
text = "Yesterday I had a coca cola, and a hot dog for lunch, and some bana split for desert. I liked the coke, but the banana in the banana split dessert was ripe"

lexicons = {
    "food":["hot dog",
             "banana",
             "banana split"],

    "beverage":["coke",
                 "cola",
                 "coca cola"],
     }
print(ngrams_finder(lexicons, text))

split_sentences function taken from here: Splitting a sentence by ending characters

Post a Comment for "Extracting N-grams From Tweets In Python"