How to extract subjects in a sentence and their respective dependent phrases?

21,415

Solution 1

I was going through spacy library more, and I finally figured out the solution through dependency management. Thanks to this repo, I figured out how to include adjectives as well in my subjective verb object (making it SVAO's), as well as taking out compound subjects in the query. Here goes my solution:

from nltk.stem.wordnet import WordNetLemmatizer
from spacy.lang.en import English

SUBJECTS = ["nsubj", "nsubjpass", "csubj", "csubjpass", "agent", "expl"]
OBJECTS = ["dobj", "dative", "attr", "oprd"]
ADJECTIVES = ["acomp", "advcl", "advmod", "amod", "appos", "nn", "nmod", "ccomp", "complm",
              "hmod", "infmod", "xcomp", "rcmod", "poss"," possessive"]
COMPOUNDS = ["compound"]
PREPOSITIONS = ["prep"]

def getSubsFromConjunctions(subs):
    moreSubs = []
    for sub in subs:
        # rights is a generator
        rights = list(sub.rights)
        rightDeps = {tok.lower_ for tok in rights}
        if "and" in rightDeps:
            moreSubs.extend([tok for tok in rights if tok.dep_ in SUBJECTS or tok.pos_ == "NOUN"])
            if len(moreSubs) > 0:
                moreSubs.extend(getSubsFromConjunctions(moreSubs))
    return moreSubs

def getObjsFromConjunctions(objs):
    moreObjs = []
    for obj in objs:
        # rights is a generator
        rights = list(obj.rights)
        rightDeps = {tok.lower_ for tok in rights}
        if "and" in rightDeps:
            moreObjs.extend([tok for tok in rights if tok.dep_ in OBJECTS or tok.pos_ == "NOUN"])
            if len(moreObjs) > 0:
                moreObjs.extend(getObjsFromConjunctions(moreObjs))
    return moreObjs

def getVerbsFromConjunctions(verbs):
    moreVerbs = []
    for verb in verbs:
        rightDeps = {tok.lower_ for tok in verb.rights}
        if "and" in rightDeps:
            moreVerbs.extend([tok for tok in verb.rights if tok.pos_ == "VERB"])
            if len(moreVerbs) > 0:
                moreVerbs.extend(getVerbsFromConjunctions(moreVerbs))
    return moreVerbs

def findSubs(tok):
    head = tok.head
    while head.pos_ != "VERB" and head.pos_ != "NOUN" and head.head != head:
        head = head.head
    if head.pos_ == "VERB":
        subs = [tok for tok in head.lefts if tok.dep_ == "SUB"]
        if len(subs) > 0:
            verbNegated = isNegated(head)
            subs.extend(getSubsFromConjunctions(subs))
            return subs, verbNegated
        elif head.head != head:
            return findSubs(head)
    elif head.pos_ == "NOUN":
        return [head], isNegated(tok)
    return [], False

def isNegated(tok):
    negations = {"no", "not", "n't", "never", "none"}
    for dep in list(tok.lefts) + list(tok.rights):
        if dep.lower_ in negations:
            return True
    return False

def findSVs(tokens):
    svs = []
    verbs = [tok for tok in tokens if tok.pos_ == "VERB"]
    for v in verbs:
        subs, verbNegated = getAllSubs(v)
        if len(subs) > 0:
            for sub in subs:
                svs.append((sub.orth_, "!" + v.orth_ if verbNegated else v.orth_))
    return svs

def getObjsFromPrepositions(deps):
    objs = []
    for dep in deps:
        if dep.pos_ == "ADP" and dep.dep_ == "prep":
            objs.extend([tok for tok in dep.rights if tok.dep_  in OBJECTS or (tok.pos_ == "PRON" and tok.lower_ == "me")])
    return objs

def getAdjectives(toks):
    toks_with_adjectives = []
    for tok in toks:
        adjs = [left for left in tok.lefts if left.dep_ in ADJECTIVES]
        adjs.append(tok)
        adjs.extend([right for right in tok.rights if tok.dep_ in ADJECTIVES])
        tok_with_adj = " ".join([adj.lower_ for adj in adjs])
        toks_with_adjectives.extend(adjs)

    return toks_with_adjectives

def getObjsFromAttrs(deps):
    for dep in deps:
        if dep.pos_ == "NOUN" and dep.dep_ == "attr":
            verbs = [tok for tok in dep.rights if tok.pos_ == "VERB"]
            if len(verbs) > 0:
                for v in verbs:
                    rights = list(v.rights)
                    objs = [tok for tok in rights if tok.dep_ in OBJECTS]
                    objs.extend(getObjsFromPrepositions(rights))
                    if len(objs) > 0:
                        return v, objs
    return None, None

def getObjFromXComp(deps):
    for dep in deps:
        if dep.pos_ == "VERB" and dep.dep_ == "xcomp":
            v = dep
            rights = list(v.rights)
            objs = [tok for tok in rights if tok.dep_ in OBJECTS]
            objs.extend(getObjsFromPrepositions(rights))
            if len(objs) > 0:
                return v, objs
    return None, None

def getAllSubs(v):
    verbNegated = isNegated(v)
    subs = [tok for tok in v.lefts if tok.dep_ in SUBJECTS and tok.pos_ != "DET"]
    if len(subs) > 0:
        subs.extend(getSubsFromConjunctions(subs))
    else:
        foundSubs, verbNegated = findSubs(v)
        subs.extend(foundSubs)
    return subs, verbNegated

def getAllObjs(v):
    # rights is a generator
    rights = list(v.rights)
    objs = [tok for tok in rights if tok.dep_ in OBJECTS]
    objs.extend(getObjsFromPrepositions(rights))

    potentialNewVerb, potentialNewObjs = getObjFromXComp(rights)
    if potentialNewVerb is not None and potentialNewObjs is not None and len(potentialNewObjs) > 0:
        objs.extend(potentialNewObjs)
        v = potentialNewVerb
    if len(objs) > 0:
        objs.extend(getObjsFromConjunctions(objs))
    return v, objs

def getAllObjsWithAdjectives(v):
    # rights is a generator
    rights = list(v.rights)
    objs = [tok for tok in rights if tok.dep_ in OBJECTS]

    if len(objs)== 0:
        objs = [tok for tok in rights if tok.dep_ in ADJECTIVES]

    objs.extend(getObjsFromPrepositions(rights))

    potentialNewVerb, potentialNewObjs = getObjFromXComp(rights)
    if potentialNewVerb is not None and potentialNewObjs is not None and len(potentialNewObjs) > 0:
        objs.extend(potentialNewObjs)
        v = potentialNewVerb
    if len(objs) > 0:
        objs.extend(getObjsFromConjunctions(objs))
    return v, objs

def findSVOs(tokens):
    svos = []
    verbs = [tok for tok in tokens if tok.pos_ == "VERB" and tok.dep_ != "aux"]
    for v in verbs:
        subs, verbNegated = getAllSubs(v)
        # hopefully there are subs, if not, don't examine this verb any longer
        if len(subs) > 0:
            v, objs = getAllObjs(v)
            for sub in subs:
                for obj in objs:
                    objNegated = isNegated(obj)
                    svos.append((sub.lower_, "!" + v.lower_ if verbNegated or objNegated else v.lower_, obj.lower_))
    return svos

def findSVAOs(tokens):
    svos = []
    verbs = [tok for tok in tokens if tok.pos_ == "VERB" and tok.dep_ != "aux"]
    for v in verbs:
        subs, verbNegated = getAllSubs(v)
        # hopefully there are subs, if not, don't examine this verb any longer
        if len(subs) > 0:
            v, objs = getAllObjsWithAdjectives(v)
            for sub in subs:
                for obj in objs:
                    objNegated = isNegated(obj)
                    obj_desc_tokens = generate_left_right_adjectives(obj)
                    sub_compound = generate_sub_compound(sub)
                    svos.append((" ".join(tok.lower_ for tok in sub_compound), "!" + v.lower_ if verbNegated or objNegated else v.lower_, " ".join(tok.lower_ for tok in obj_desc_tokens)))
    return svos

def generate_sub_compound(sub):
    sub_compunds = []
    for tok in sub.lefts:
        if tok.dep_ in COMPOUNDS:
            sub_compunds.extend(generate_sub_compound(tok))
    sub_compunds.append(sub)
    for tok in sub.rights:
        if tok.dep_ in COMPOUNDS:
            sub_compunds.extend(generate_sub_compound(tok))
    return sub_compunds

def generate_left_right_adjectives(obj):
    obj_desc_tokens = []
    for tok in obj.lefts:
        if tok.dep_ in ADJECTIVES:
            obj_desc_tokens.extend(generate_left_right_adjectives(tok))
    obj_desc_tokens.append(obj)

    for tok in obj.rights:
        if tok.dep_ in ADJECTIVES:
            obj_desc_tokens.extend(generate_left_right_adjectives(tok))

    return obj_desc_tokens

Now when you pass query such as:

from spacy.lang.en import English
parser = English()

sentence = u"""
Donald Trump is the worst president of USA, but Hillary is better than him
"""

parse = parser(sentence)
print(findSVAOs(parse))

You will get the following:

[(u'donald trump', u'is', u'worst president'), (u'hillary', u'is', u'better')]

Thank you @Krzysiek for your solution too, I actually was unable to go deep into your library to modify it. I rather tried modifying the above mentioned link to solve my problem.

Solution 2

I was recently just solving very similar problem - I needed to extract subject(s), action, object(s). And I open sourced my work so you can check this library: https://github.com/krzysiekfonal/textpipeliner

This based on spacy(opponent to nltk) but it also based on sentence tree.

So for instance let's get this doc embedded in spacy as example:

import spacy
nlp = spacy.load("en")
doc = nlp(u"The Empire of Japan aimed to dominate Asia and the " \
               "Pacific and was already at war with the Republic of China " \
               "in 1937, but the world war is generally said to have begun on " \
               "1 September 1939 with the invasion of Poland by Germany and " \
               "subsequent declarations of war on Germany by France and the United Kingdom. " \
               "From late 1939 to early 1941, in a series of campaigns and treaties, Germany conquered " \
               "or controlled much of continental Europe, and formed the Axis alliance with Italy and Japan. " \
               "Under the Molotov-Ribbentrop Pact of August 1939, Germany and the Soviet Union partitioned and " \
               "annexed territories of their European neighbours, Poland, Finland, Romania and the Baltic states. " \
               "The war continued primarily between the European Axis powers and the coalition of the United Kingdom " \
               "and the British Commonwealth, with campaigns including the North Africa and East Africa campaigns, " \
               "the aerial Battle of Britain, the Blitz bombing campaign, the Balkan Campaign as well as the " \
               "long-running Battle of the Atlantic. In June 1941, the European Axis powers launched an invasion " \
               "of the Soviet Union, opening the largest land theatre of war in history, which trapped the major part " \
               "of the Axis' military forces into a war of attrition. In December 1941, Japan attacked " \
               "the United States and European territories in the Pacific Ocean, and quickly conquered much of " \
               "the Western Pacific.")

You can now create a simple pipes structure(more about pipes in readme of this project):

pipes_structure = [SequencePipe([FindTokensPipe("VERB/nsubj/*"),
                                 NamedEntityFilterPipe(),
                                 NamedEntityExtractorPipe()]),
                   FindTokensPipe("VERB"),
                   AnyPipe([SequencePipe([FindTokensPipe("VBD/dobj/NNP"),
                                          AggregatePipe([NamedEntityFilterPipe("GPE"), 
                                                NamedEntityFilterPipe("PERSON")]),
                                          NamedEntityExtractorPipe()]),
                            SequencePipe([FindTokensPipe("VBD/**/*/pobj/NNP"),
                                          AggregatePipe([NamedEntityFilterPipe("LOC"), 
                                                NamedEntityFilterPipe("PERSON")]),
                                          NamedEntityExtractorPipe()])])]

engine = PipelineEngine(pipes_structure, Context(doc), [0,1,2])
engine.process()

And in the result you will get:

>>>[([Germany], [conquered], [Europe]),
 ([Japan], [attacked], [the, United, States])]

Actually it based strongly (the finding pipes) on another library - grammaregex. You can read about it from a post: https://medium.com/@krzysiek89dev/grammaregex-library-regex-like-for-text-mining-49e5706c9c6d#.zgx7odhsc

EDITED

Actually the example I presented in readme discards adj, but all you need is to adjust pipe structure passed to engine according to your needs. For instance for your sample sentences I can propose such structure/solution which give you tuple of 3 elements(subj, verb, adj) per every sentence:

import spacy
from textpipeliner import PipelineEngine, Context
from textpipeliner.pipes import *

pipes_structure = [SequencePipe([FindTokensPipe("VERB/nsubj/NNP"),
                                 NamedEntityFilterPipe(),
                                 NamedEntityExtractorPipe()]),
                       AggregatePipe([FindTokensPipe("VERB"),
                                      FindTokensPipe("VERB/xcomp/VERB/aux/*"),
                                      FindTokensPipe("VERB/xcomp/VERB")]),
                       AnyPipe([FindTokensPipe("VERB/[acomp,amod]/ADJ"),
                                AggregatePipe([FindTokensPipe("VERB/[dobj,attr]/NOUN/det/DET"),
                                               FindTokensPipe("VERB/[dobj,attr]/NOUN/[acomp,amod]/ADJ")])])
                      ]

engine = PipelineEngine(pipes_structure, Context(doc), [0,1,2])
engine.process()

It will give you result:

[([Donald, Trump], [is], [the, worst])]

A little bit complexity is in the fact you have compound sentence and the lib produce one tuple per sentence - I'll soon add possibility(I need it too for my project) to pass a list of pipe structures to engine to allow produce more tuples per sentence. But for now you can solve it just by creating second engine for compounded sents which structure will differ only of VERB/conj/VERB instead of VERB(those regex starts always from ROOT, so VERB/conj/VERB lead you to just second verb in compound sentence):

pipes_structure_comp = [SequencePipe([FindTokensPipe("VERB/conj/VERB/nsubj/NNP"),
                                 NamedEntityFilterPipe(),
                                 NamedEntityExtractorPipe()]),
                   AggregatePipe([FindTokensPipe("VERB/conj/VERB"),
                                  FindTokensPipe("VERB/conj/VERB/xcomp/VERB/aux/*"),
                                  FindTokensPipe("VERB/conj/VERB/xcomp/VERB")]),
                   AnyPipe([FindTokensPipe("VERB/conj/VERB/[acomp,amod]/ADJ"),
                            AggregatePipe([FindTokensPipe("VERB/conj/VERB/[dobj,attr]/NOUN/det/DET"),
                                           FindTokensPipe("VERB/conj/VERB/[dobj,attr]/NOUN/[acomp,amod]/ADJ")])])
                  ]

engine2 = PipelineEngine(pipes_structure_comp, Context(doc), [0,1,2])

And now after you run both engines you will get expected result :)

engine.process()
engine2.process()
[([Donald, Trump], [is], [the, worst])]
[([Hillary], [is], [better])]

This is what you need I think. Of course I just quickly created a pipe structure for given example sentence and it won't work for every case, but I saw a lot of sentence structures and it will already fulfil quite nice percentage, but then you can just add more FindTokensPipe etc for cases which won't work currently and I'm sure after a few adjustment you will cover really good number of possible sentences(english is not too complex so...:)

Share:
21,415
psr
Author by

psr

Technology has always lured me. Love to code, living to travel. Know me more on www.prabhjotrai.com.

Updated on July 09, 2022

Comments

  • psr
    psr almost 2 years

    I am trying to work on subject extraction in a sentence, so that I can get the sentiments in accordance with the subject. I am using nltk in python2.7 for this purpose. Take the following sentence as an example:

    Donald Trump is the worst president of USA, but Hillary is better than him

    He we can see that Donald Trump and Hillary are the two subjects, and sentiments related to Donald Trump is negative but related to Hillary are positive. Till now, I am able to break this sentence into chunks of noun phrases, and I am able to get the following:

    (S
      (NP Donald/NNP Trump/NNP)
      is/VBZ
      (NP the/DT worst/JJS president/NN)
      in/IN
      (NP USA,/NNP)
      but/CC
      (NP Hillary/NNP)
      is/VBZ
      better/JJR
      than/IN
      (NP him/PRP))
    

    Now, how do I approach in finding the subjects from these noun phrases? Then how do I group the phrases meant for both the subjects together? Once I have the phrases meant for both the subjects separately, I can perform sentiment analysis on both of them separately.

    EDIT

    I looked into the library mentioned by @Krzysiek (spacy), and it gave me dependency trees as well in the sentences.

    Here is the code:

    from spacy.en import English
    parser = English()
    
    example = u"Donald Trump is the worst president of USA, but Hillary is better than him"
    parsedEx = parser(example)
    # shown as: original token, dependency tag, head word, left dependents, right dependents
    for token in parsedEx:
        print(token.orth_, token.dep_, token.head.orth_, [t.orth_ for t in token.lefts], [t.orth_ for t in token.rights])
    

    Here are the dependency trees:

    (u'Donald', u'compound', u'Trump', [], [])
    (u'Trump', u'nsubj', u'is', [u'Donald'], [])
    (u'is', u'ROOT', u'is', [u'Trump'], [u'president', u',', u'but', u'is'])
    (u'the', u'det', u'president', [], [])
    (u'worst', u'amod', u'president', [], [])
    (u'president', u'attr', u'is', [u'the', u'worst'], [u'of'])
    (u'of', u'prep', u'president', [], [u'USA'])
    (u'USA', u'pobj', u'of', [], [])
    (u',', u'punct', u'is', [], [])
    (u'but', u'cc', u'is', [], [])
    (u'Hillary', u'nsubj', u'is', [], [])
    (u'is', u'conj', u'is', [u'Hillary'], [u'better'])
    (u'better', u'acomp', u'is', [], [u'than'])
    (u'than', u'prep', u'better', [], [u'him'])
    (u'him', u'pobj', u'than', [], [])
    

    This gives in depth insights into the dependencies of the different tokens of the sentences. Here is the link to the paper which describes the dependencies between different pairs. How can I use this tree to attach the contextual words for different subjects to them?

  • psr
    psr over 7 years
    I tried your solution, but it discards the adjectives/adverbs used for different subjects. Since I have to perform sentiment analysis, this will render all the statements/relations as neutral.
  • Krzysiek
    Krzysiek over 7 years
    ok, so the answer on your comment is added after EDITED section in original answer as in comment there is not enough space.
  • Krzysiek
    Krzysiek over 7 years
    Nice, spacy is really powerful ;) By the way I just implemented in my lib possibility to pass list of pipes structure to get more than one tuple per single statement ;) I'm wondering if your solution works for very different examples of statements?
  • psr
    psr over 7 years
    Yes, indeed. It tries to parse many other dependencies other than just the subject-verb-objects. I will try to implement your library tonight and explore it.
  • Jaffer Wilson
    Jaffer Wilson over 6 years
    @Krzysiek getting error: AttributeError: 'spacy.tokens.doc.Doc' object has no attribute 'next_sent' after typing engine2.process()
  • Krzysiek
    Krzysiek over 6 years
    @JafferWilson thanks. It should be Context which is kind of wrapper for spacy doc. I corrected it.
  • Jaffer Wilson
    Jaffer Wilson over 6 years
    @Krzysiek When I tried your code, I get output as [] with your example.
  • Krzysiek
    Krzysiek over 6 years
    @JafferWilson I just tried and it works fine. A little bit confusing is that section before "EDITED" works for the my example ("The Empire of Japan...") but the code after "EDITED" was tweaked to works for the question's author needs and his statement: "Donald Trump is the worst president of USA, but Hillary is better than him".
  • Ashok Kumar Jayaraman
    Ashok Kumar Jayaraman about 5 years
    I am getting an empty list as a result. Is it working for you?
  • chattrat423
    chattrat423 about 5 years
    The above import English appears to not contain a parser anymore? Anyways, the fix is this -- import spacy parser = spacy.load('en', disable=['ner','textcat'])
  • Daniel Gannota
    Daniel Gannota over 4 years
    I have been trying to run the code both with the described method and also using a specified model en_core_web_lg instead of parser English(). In both cases the code returns nothing for the same example sentence. What could I be doing wrong? I am running the latest version of spacy and its models.
  • Anshuman Srivastava
    Anshuman Srivastava about 4 years
    I am getting same issue. It is returning me empty list []
  • Matias
    Matias about 4 years
    @Krzysiek I tried all examples and I am getting an empty list, also I tried the examples below and the situation is the same (some people is commenting that they are having this issues too) any idea what might be the reason? Thanks
  • Azrion
    Azrion almost 4 years
    Change following in def findSVAOs(tokens): svos = [] verbs = [tok for tok in tokens if tok.pos_ == "AUX"]
  • RandomTask
    RandomTask almost 4 years
    I was also getting an empty list []. I added all changes in the comments above. "en" didn't load so I used: parser = spacy.load('en_core_web_md', disable=['ner','textcat']). Also, I had to REMOVE "parser = English()" then it worked.
  • kely789456123
    kely789456123 almost 4 years
    @Krzysiek Can we used your solution for french Data ??
  • kely789456123
    kely789456123 almost 4 years
    Can we use this code to parse french data also? I want to retrieve negative sentences in a dataset ?
  • Michael Lilley
    Michael Lilley over 2 years
    Initially I was only able to get one SVAO extracted per sentence, rather than ([(u'donald trump', u'is', u'worst president'), (u'hillary', u'is', u'better')]). To fix this, I changed verbs = [tok for tok in tokens if tok.pos_ == "VERB" and tok.dep_ != "aux"] to verbs = [tok for tok in tokens if tok.pos_ == "VERB" or tok.dep_ != "aux"].