This repository has been archived by the owner on Jul 28, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
kmeans_example.py
executable file
·69 lines (56 loc) · 2.03 KB
/
kmeans_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import KMeans
# $example off$
from pyspark.sql import SparkSession
import sys
"""
An example demonstrating k-means clustering.
Run with:
bin/spark-submit examples/src/main/python/ml/kmeans_example.py
This example requires NumPy (http://www.numpy.org/).
"""
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("KMeansExample")\
.getOrCreate()
inputFile = sys.argv[1]
outputFile = sys.argv[2]
# $example on$
# Loads data.
dataset = spark.read.format("libsvm").load(inputFile)
# Trains a k-means model.
kmeans = KMeans().setK(2).setSeed(1)
model = kmeans.fit(dataset)
# Evaluate clustering by computing Within Set Sum of Squared Errors.
wssse = model.computeCost(dataset)
print("Within Set Sum of Squared Errors = " + str(wssse))
# Shows the result.
centers = model.clusterCenters()
print("Cluster Centers: ")
outputContent = "Cluster Centers: "
for center in centers:
print(center)
outputContent += str(center) + '\n'
# outputContent.join(tmp)
with open(outputFile, "w") as text_file:
text_file.write(outputContent)
# $example off$
spark.stop()