AI ChatBot Experiment Learning resources


#1

Articles:

Videos:

Example Projects:

Libraries:


#2

Some of the Spacy related code examples:

import spacy
# from spacy import displacy
# from spacy.lemmatizer import Lemmatizer
# from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES
#
# lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
# print(lemmatizer('fastest', 'adj'))  # 2nd param is token's part-of-speech tag

nlp = spacy.load('en') #Loads the spacy en model into a python object

# doc = nlp(u'Google release "Move Mirror" AI experiment that matches your pose from 80,000 images') #Creates a doc object
# for token in doc:
#     print(token.text, token.pos_)  # prints the text and POS

# from nltk.stem.porter import *
# from nltk.stem.snowball import SnowballStemmer
# porter_stemmer = PorterStemmer()
# snowball_stemmer = SnowballStemmer("english")
# print(porter_stemmer.stem("fastest"))
# print(snowball_stemmer.stem("fastest"))

# my_string = u"Imagine dragons come and take over the city."
# doc = nlp(my_string)
# for ent in doc.ents:
#     print(ent.text, ent.label_)

# print(nlp.vocab[u'is'].is_stop)

# doc = nlp(u'Book me a flight from Bangalore to Goa')
# blr, goa = doc[5], doc[7]
# print(list(blr.ancestors))
# print(list(goa.ancestors))

# doc = nlp(u'Book a table at the restaurant and the taxi to the hotel')
# tasks = doc[2], doc[8] #(table, taxi)
# tasks_target = doc[5], doc[11] #(restaurant, hotel)
# for task in tasks_target:
#     for tok in task.ancestors:
#         if tok in tasks:
#             print("Booking of {} belongs to {}".format(tok, task))
#             break

# doc = nlp(u'Book a table at the restaurant and the taxi to the hotel')
# displacy.serve(doc, style="dep")

# doc = nlp(u"What are some places to visit in Berlin and stay in Lubeck")
# places = [doc[7], doc[11]] #[Berlin, Lubeck]
# actions = [doc[5], doc[9]] #[visit, stay]
# for place in places:
#     for tok in place.ancestors:
#         if tok in actions:
#             print("User is referring {} to {}".format(place, tok))
#             break

# doc = nlp(u"Boston Dynamics is gearing up to produce thousands of robot dogs")
# print(list(doc.noun_chunks))

# doc = nlp(u"Deep learning cracks the code of messenger RNAs and protein-coding potential")
# for chunk in doc.noun_chunks:
#     print(chunk.text, chunk.root.text, chunk.root.dep_,
#           chunk.root.head.text)

# doc = nlp(u'How are you doing today?')
# for token in doc:
#     print(token.text, token.vector[:5])

# hello_doc = nlp(u"hello")
# hi_doc = nlp(u"hi")
# hella_doc = nlp(u"hella")
# print(hello_doc.similarity(hi_doc))
# print(hello_doc.similarity(hella_doc))

# GoT_str1 = nlp(u"When will next season of Game of Thrones be releasing?")
# GoT_str2 = nlp(u"Game of Thrones next season release date?")
# print(GoT_str1.similarity(GoT_str2))

# example_doc = nlp(u"car truck google")
# for t1 in example_doc:
#     for t2 in example_doc:
#         similarity_perc = int(t1.similarity(t2) * 100)
#         print("Word {} is {}% similar to word {}".format(t1.text, similarity_perc,  t2.text))


# doc = nlp(u'Brexit is the impending withdrawal of the U.K. from theEuropean Union.')
# for token in doc:
#     print(token.text)

run this in Juypter notebook and remove comments.

To use Spacy, it must be installed in the working environment.

pip3 install -U spacy

or visit https://spacy.io/usage/ for installation detail.