Q:

katana-assistant

# Fit the modelmodel.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
0
classify_local('Fetch blood result for patient')
0
import nltkfrom nltk.stem.lancaster import LancasterStemmerstemmer = LancasterStemmer()# things we need for Tensorflowimport numpy as npfrom keras.models import Sequentialfrom keras.layers import Dense, Activation, Dropoutfrom keras.optimizers import SGDimport pandas as pdimport pickleimport random
0
# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this modelsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
0
def classify_local(sentence):    ERROR_THRESHOLD = 0.25        # generate probabilities from the model    input_data = pd.DataFrame([bow(sentence, words)], dtype=float, index=['input'])    results = model.predict([input_data])[0]    # filter out predictions below a threshold, and provide intent index    results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]    # sort by strength of probability    results.sort(key=lambda x: x[1], reverse=True)    return_list = []    for r in results:        return_list.append((classes[r[0]], str(r[1])))    # return tuple of intent and probability        return return_list
0
# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons# equal to number of intents to predict output intent with softmaxmodel = Sequential()model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))model.add(Dropout(0.5))model.add(Dense(64, activation='relu'))model.add(Dropout(0.5))model.add(Dense(len(train_y[0]), activation='softmax'))
0
# Use pickle to load in the pre-trained modelglobal graphgraph = tf.get_default_graph()with open(f'katana-assistant-model.pkl', 'rb') as f:    model = pickle.load(f)
0
# create our training datatraining = []# create an empty array for our outputoutput_empty = [0] * len(classes)# training set, bag of words for each sentencefor doc in documents:    # initialize our bag of words    bag = []    # list of tokenized words for the pattern    pattern_words = doc[0]    # stem each word - create base word, in attempt to represent related words    pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]    # create our bag of words array with 1, if word match found in current pattern    for w in words:        bag.append(1) if w in pattern_words else bag.append(0)        # output is a '0' for each tag and '1' for current tag (for each pattern)    output_row = list(output_empty)    output_row[classes.index(doc[1])] = 1        training.append([bag, output_row])# shuffle our features and turn into np.arrayrandom.shuffle(training)training = np.array(training)# create train and test lists. X - patterns, Y - intentstrain_x = list(training[:,0])train_y = list(training[:,1])
0
p = bow("Load blood pessure for patient", words)print (p)print (classes)
0
45 documents9 classes ['adverse_drug', 'blood_pressure', 'blood_pressure_search', 'goodbye', 'greeting', 'hospital_search', 'options', 'pharmacy_search', 'thanks']82 unique stemmed words ["'s", ',', 'a', 'advers', 'al', 'anyon', 'ar', 'awesom', 'be', 'behavy', 'blood', 'by', 'bye', 'can', 'caus', 'chat', 'check', 'could', 'dat', 'day', 'detail', 'do', 'dont', 'drug', 'entry', 'find', 'for', 'giv', 'good', 'goodby', 'hav', 'hello', 'help', 'hi', 'hist', 'hospit', 'how', 'i', 'id', 'is', 'lat', 'list', 'load', 'loc', 'log', 'look', 'lookup', 'man', 'me', 'mod', 'nearby', 'next', 'nic', 'of', 'off', 'op', 'paty', 'pharm', 'press', 'provid', 'react', 'rel', 'result', 'search', 'see', 'show', 'suit', 'support', 'task', 'thank', 'that', 'ther', 'til', 'tim', 'to', 'transf', 'up', 'want', 'what', 'which', 'with', 'you']
0

New to Communities?

Join the community