Jones
0
Q:

Naive Bayes Classifiers

# Naive Bayes Classifiers

from pyspark.sql import Row
from pyspark.ml.linalg import Vectors
df = spark.createDataFrame([
  Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),
  Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),
  Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
model = nb.fit(df)
model.pi
# DenseVector([-0.81..., -0.58...])
model.theta
# DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF()
result = model.transform(test0).head()
result.prediction
# 1.0
result.probability
# DenseVector([0.32..., 0.67...])
result.rawPrediction
# DenseVector([-1.72..., 0.99...])
test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
model.transform(test1).head().prediction
# 1.0
nb_path = temp_path + "/nb"
nb.save(nb_path)
nb2 = NaiveBayes.load(nb_path)
nb2. getSmoothing()
# 1.0
model_path = temp_path + "/nb_model"
model.save(model_path)
model2 = NaiveBayesModel.load(model_path)
model.pi == model2.pi
# True
nb = nb.setThresholds([0.01, 10.00])
model3 = nb.fit(df)
result = model3.transform(test0).head()
result.prediction
# 0.0
0

New to Communities?

Join the community