当前位置:首页 » 编程语言 » 聚类python

聚类python

发布时间: 2022-01-12 10:12:13

⑴ 如何用python对文本进行聚类

实现原理:
首先从Tourist_spots_5A_BD.txt中读取景点信息,然后通过调用无界面浏览器PhantomJS(Firefox可替代)访问网络链接"http://ke..com/",通过Selenium获取输入对话框ID,输入关键词如"故宫",再访问该网络页面。最后通过分析DOM树结构获取摘要的ID并获取其值。核心代码如下:
driver.find_elements_by_xpath("//div[@class='lemma-summary']/div")

PS:Selenium更多应用于自动化测试,推荐Python爬虫使用scrapy等开源工具。
# coding=utf-8
"""
Created on 2015-09-04 @author: Eastmount
"""

import time
import re
import os
import sys
import codecs
import shutil
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.support.ui as ui
from selenium.webdriver.common.action_chains import ActionChains

#Open PhantomJS
driver = webdriver.PhantomJS(executable_path="G:\phantomjs-1.9.1-windows\phantomjs.exe")
#driver = webdriver.Firefox()
wait = ui.WebDriverWait(driver,10)

#Get the Content of 5A tourist spots
def getInfobox(entityName, fileName):
try:
#create paths and txt files
print u'文件名称: ', fileName
info = codecs.open(fileName, 'w', 'utf-8')

#locate input notice: 1.visit url by unicode 2.write files
#Error: Message: Element not found in the cache -
# Perhaps the page has changed since it was looked up
#解决方法: 使用Selenium和Phantomjs
print u'实体名称: ', entityName.rstrip('\n')
driver.get("http://ke..com/")
elem_inp = driver.find_element_by_xpath("//form[@id='searchForm']/input")
elem_inp.send_keys(entityName)
elem_inp.send_keys(Keys.RETURN)
info.write(entityName.rstrip('\n')+'\r\n') #codecs不支持'\n'换行
time.sleep(2)

#load content 摘要
elem_value = driver.find_elements_by_xpath("//div[@class='lemma-summary']/div")
for value in elem_value:
print value.text
info.writelines(value.text + '\r\n')
time.sleep(2)

except Exception,e: #'utf8' codec can't decode byte
print "Error: ",e
finally:
print '\n'
info.close()

#Main function
def main():
#By function get information
path = "BaiSpider\\"
if os.path.isdir(path):
shutil.rmtree(path, True)
os.makedirs(path)
source = open("Tourist_spots_5A_BD.txt", 'r')
num = 1
for entityName in source:
entityName = unicode(entityName, "utf-8")
if u'故宫' in entityName: #else add a '?'
entityName = u'北京故宫'
name = "%04d" % num
fileName = path + str(name) + ".txt"
getInfobox(entityName, fileName)
num = num + 1
print 'End Read Files!'
source.close()
driver.close()

if __name__ == '__main__':
main()

⑵ python怎么做聚类树状图

#-*-coding:utf-8-*-importmathimportpylabaspl#数据集:每三个是一组分别是西瓜的编号,密度,含糖量data="""
1,0.697,0.46,2,0.774,0.376,3,0.634,0.264,4,0.608,0.318,5,0.556,0.215,
6,0.403,0.237,7,0.481,0.149,8,0.437,0.211,9,0.666,0.091,10,0.243,0.267,
11,0.245,0.057,12,0.343,0.099,13,0.639,0.161,14,0.657,0.198,15,0.36,0.37,
16,0.593,0.042,17,0.719,0.103,18,0.359,0.188,19,0.339,0.241,20,0.282,0.257,
21,0.748,0.232,22,0.714,0.346,23,0.483,0.312,24,0.478,0.437,25,0.525,0.369,
26,0.751,0.489,27,0.532,0.472,28,0.473,0.376,29,0.725,0.445,30,0.446,0.459"""#数据处理dataset是30个样本(密度,含糖量)的列表a=data.split(',')
dataset=[(float(a[i]),float(a[i+1]))foriinrange(1,len(a)-1,3)]#计算欧几里得距离,a,b分别为两个元组defdist(a,b):
returnmath.sqrt(math.pow(a[0]-b[0],2)+math.pow(a[1]-b[1],2))#dist_mindefdist_min(Ci,Cj):
returnmin(dist(i,j)foriinCiforjinCj)#dist_maxdefdist_max(Ci,Cj):
returnmax(dist(i,j)foriinCiforjinCj)#dist_avgdefdist_avg(Ci,Cj):
returnsum(dist(i,j)foriinCiforjinCj)/(len(Ci)*len(Cj))#找到距离最小的下标deffind_Min(M):
min=1000
x=0;y=0
foriinrange(len(M)):forjinrange(len(M[i])):ifi!=jandM[i][j]<min:
min=M[i][j];x=i;y=jreturn(x,y,min)#算法模型:defAGNES(dataset,dist,k):
#初始化C和M
C=[];M=[]foriindataset:
Ci=[]
Ci.append(i)
C.append(Ci)foriinC:
Mi=[]forjinC:
Mi.append(dist(i,j))
M.append(Mi)
q=len(dataset)#合并更新
whileq>k:
x,y,min=find_Min(M)
C[x].extend(C[y])
C.remove(C[y])
M=[]foriinC:
Mi=[]forjinC:
Mi.append(dist(i,j))
M.append(Mi)
q-=1
returnC#画图defdraw(C):
colValue=['r','y','g','b','c','k','m']foriinrange(len(C)):
coo_X=[]#x坐标列表
coo_Y=[]#y坐标列表
forjinrange(len(C[i])):
coo_X.append(C[i][j][0])
coo_Y.append(C[i][j][1])
pl.scatter(coo_X,coo_Y,marker='x',color=colValue[i%len(colValue)],label=i)

pl.legend(loc='upperright')
pl.show()

C=AGNES(dataset,dist_avg,3)
draw(C)

⑶ 如何用Python对人员轨迹聚类

把你的 xy 变换成 onehot编码 ,这样的话 聚类算法就都可以兼容了,
KMeans, DBScan, 层次聚类,等等都是可以的

⑷ 怎么用python进行聚类分析

、K均值聚类K-Means算法思想简单,效果却很好,是最有名的聚类算法。聚类算法的步骤如下:1:初始化K个样本作为初始聚类中心;2:计算每个样本点到K个中心的距离,选择最近的中心作为其分类,直到所有样本点分类完毕;3:分别计算K个类中所有样本的质心,作为新的中心点,完成一轮迭代。通常的迭代结束条件为新的质心与之前的质心偏移值小于一

⑸ k-means聚类算法python实现,导入的数据集有什么要求

一,K-Means聚类算法原理
k-means 算法接受参数 k
;然后将事先输入的n个数据对象划分为
k个聚类以便使得所获得的聚类满足:同一聚类中的对象相似度较高;而不同聚类中的对象相似度较小。聚类相似度是利用各聚类中对象的均值所获得一个“中心对
象”(引力中心)来进行计算的。
K-means算法是最为经典的基于划分的聚类方法,是十大经典数据挖掘算法之一。K-means算法的基本思想是:以空间中k个点为中心进行聚类,对最靠近他们的对象归类。通过迭代的方法,逐次更新各聚类中心的值,直至得到最好的聚类结果。

⑹ 减法聚类如何用Python实现

下面是一个k-means聚类算法在python2.7.5上面的具体实现,你需要先安装Numpy和Matplotlib:
from numpy import *
import time
import matplotlib.pyplot as plt

# calculate Euclidean distance
def euclDistance(vector1, vector2):
return sqrt(sum(power(vector2 - vector1, 2)))
# init centroids with random samples
def initCentroids(dataSet, k):
numSamples, dim = dataSet.shape
centroids = zeros((k, dim))
for i in range(k):
index = int(random.uniform(0, numSamples))
centroids[i, :] = dataSet[index, :]
return centroids
# k-means cluster
def kmeans(dataSet, k):
numSamples = dataSet.shape[0]
# first column stores which cluster this sample belongs to,
# second column stores the error between this sample and its centroid
clusterAssment = mat(zeros((numSamples, 2)))
clusterChanged = True
## step 1: init centroids
centroids = initCentroids(dataSet, k)
while clusterChanged:
clusterChanged = False
## for each sample
for i in xrange(numSamples):
minDist = 100000.0
minIndex = 0
## for each centroid
## step 2: find the centroid who is closest
for j in range(k):
distance = euclDistance(centroids[j, :], dataSet[i, :])
if distance < minDist:
minDist = distance
minIndex = j

## step 3: update its cluster
if clusterAssment[i, 0] != minIndex:
clusterChanged = True
clusterAssment[i, :] = minIndex, minDist**2
## step 4: update centroids
for j in range(k):
pointsInCluster = dataSet[nonzero(clusterAssment[:, 0].A == j)[0]]
centroids[j, :] = mean(pointsInCluster, axis = 0)
print 'Congratulations, cluster complete!'
return centroids, clusterAssment
# show your cluster only available with 2-D data
def showCluster(dataSet, k, centroids, clusterAssment):
numSamples, dim = dataSet.shape
if dim != 2:
print "Sorry! I can not draw because the dimension of your data is not 2!"
return 1
mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']
if k > len(mark):
print "Sorry! Your k is too large! please contact Zouxy"
return 1
# draw all samples
for i in xrange(numSamples):
markIndex = int(clusterAssment[i, 0])
plt.plot(dataSet[i, 0], dataSet[i, 1], mark[markIndex])
mark = ['Dr', 'Db', 'Dg', 'Dk', '^b', '+b', 'sb', 'db', '<b', 'pb']
# draw the centroids
for i in range(k):
plt.plot(centroids[i, 0], centroids[i, 1], mark[i], markersize = 12)
plt.show()

⑺ 用python2.7做kmeans聚类算法怎么导入数据

指定文件名
问题描述:一堆二维数据,用kmeans算法对其进行聚类,下面例子以分k=3为例。
原数据:
1.5,3.1
2.2,2.9
3,4
2,1
15,25
43,13
32,42
0,0
8,9
12,5
9,12
11,8
22,33
24,25

实现代码:

[python] view plain
#coding:utf-8
from numpy import *
import string
import math

def loadDataSet(filename):
dataMat = []
fr = open(filename)
for line in fr.readlines():
element = line.strip('\n').split(',')
number = []
for i in range(len(element)):
number.append(string.atof(element[i]))
dataMat.append(number)
return dataMat

def distEclud(vecA, vecB):
count = len(vecA)
s = 0.0
for i in range(0, count):
s = s + power(vecA[i]-vecB[i], 2)
return sqrt(s)

def clusterOfElement(means, element):
min_dist = distEclud(means[0], element)
lable = 0
for index in range(1, len(means)):
dist = distEclud(means[index], element)
if(dist < min_dist):
min_dist = dist
lable = index
return lable

def getMean(cluster): #cluster=[[[1,2],[1,2],[1,2]....],[[2,1],[2,1],[2,1],[2,1]...]]
num = len(cluster) #1个簇的num,如上为3个
res = []
temp = 0
dim = len(cluster[0])
for i in range(0, dim):
for j in range(0, num):
temp = temp + cluster[j][i]
temp = temp / num
res.append(temp)
return res

def kMeans():
k = 3
data = loadDataSet('data.txt')
print "data is ", data
inite_mean = [[1.1, 1], [1, 1],[1,2]]

count = 0
while(count < 1000):
count = count + 1
clusters = []
means = []
for i in range(k):
clusters.append([])
means.append([])

for index in range(len(data)):
lable = clusterOfElement(inite_mean, data[index])
clusters[lable].append(data[index])

for cluster_index in range(k):
mea = getMean(clusters[cluster_index])
for mean_dim in range(len(mea)):
means[cluster_index].append(mea[mean_dim])

for mm in range(len(means)):
for mmm in range(len(means[mm])):
inite_mean[mm][mmm] = means[mm][mmm]

print "result cluster is ", clusters
print "result means is ", inite_mean

kMeans()

⑻ python聚类每次结果相同么

python中用pdf_multivariate求解多维密度分布,然后用plot_surface画三维曲面图; 另外用matlab也!

⑼ python代码如何应用系统聚类和K-means聚类法进行聚类分析 然后选择变量,建立适当的模型

-Means聚类算法
k-means算法以k为参数,把n个对象分成k个簇,使簇内具有较高的相似度,而簇间的相似度较低。

随机选择k个点作为初始的聚类中心。
对于剩下的点,根据其与聚类中心的距离,将其归入最近的簇。
对每个簇,计算所有点的均值作为新的聚类中心。
重复2,3直到聚类中心不再发生改变

Figure 1

K-means的应用
数据介绍:
现有1999年全国31个省份城镇居民家庭平均每人全年消费性支出的八大主要变量数据,这八大变量分别是:食品、衣着、家庭设备用品及服务、医疗保健、交通和通讯、娱乐教育文化服务、居住以及杂项商品和服务。利用已有数据,对31个省份进行聚类。

实验目的:
通过聚类,了解1999年各个省份的消费水平在国内的情况。

技术路线:
sklearn.cluster.Kmeans

数据实例:

⑽ python对数据进行聚类怎么显示数据分类

将其整理成数据集为:
[ [1,0,"yes"],[1,1,"yes"],[0,1,"yes"],[0,0,"no"],[1,0,"no"] ]
算法过程:

1、计算原始的信息熵。
2、依次计算数据集中每个样本的每个特征的信息熵。
3、比较不同特征信息熵的大小,选出信息熵最大的特征值并输出。
运行结果:
col : 0 curInfoGain : 2.37744375108 baseInfoGain : 0.0
col : 1 curInfoGain : 1.37744375108 baseInfoGain : 2.37744375108
bestInfoGain : 2.37744375108 bestFeature: 0
结果分析:
说明按照第一列,即有无喉结这个特征来进行分类的效果更好。
思考:
1、能否利用决策树算法,将样本最终的分类结果进行输出?如样本1,2,3属于男性,4属于女性。

2、示例程序生成的决策树只有一层,当特征量增多的时候,如何生成具有多层结构的决策树?
3、如何评判分类结果的好坏?
在下一篇文章中,我将主要对以上三个问题进行分析和解答。如果您也感兴趣,欢迎您订阅我的文章,也可以在下方进行评论,如果有疑问或认为不对的地方,您也可以留言,我将积极与您进行解答。
完整代码如下:
from math import log
"""
计算信息熵
"""
def calcEntropy(dataset):
diclabel = {} ## 标签字典,用于记录每个分类标签出现的次数
for record in dataset:
label = record[-1]
if label not in diclabel.keys():
diclabel[label] = 0
diclabel[label] += 1
### 计算熵
entropy = 0.0
cnt = len(dataset)
for label in diclabel.keys():
prob = float(1.0 * diclabel[label]/cnt)
entropy -= prob * log(prob,2)
return entropy
def initDataSet():
dataset = [[1,0,"yes"],[1,1,"yes"],[0,1,"yes"],[0,0,"no"],[1,0,"no"]]
label = ["male","female"]
return dataset,label
#### 拆分dataset ,根据指定的过滤选项值,去掉指定的列形成一个新的数据集
def splitDataset(dataset , col, value):
retset = [] ## 拆分后的数据集
for record in dataset:
if record[col] == value :
recedFeatVec = record[:col]
recedFeatVec.extend(record[col+1:]) ### 将指定的列剔除
retset.append(recedFeatVec) ### 将新形成的特征值列表追加到返回的列表中
return retset
### 找出信息熵增益最大的特征值
### 参数:
### dataset : 原始的数据集
def findBestFeature(dataset):
numFeatures = len(dataset[0]) - 1 ### 特征值的个数
baseEntropy = calcEntropy(dataset) ### 计算原始数据集的熵
baseInfoGain = 0.0 ### 初始信息增益
bestFeature = -1 ### 初始的最优分类特征值索引
### 计算每个特征值的熵
for col in range(numFeatures):
features = [record[col] for record in dataset] ### 提取每一列的特征向量 如此处col= 0 ,则features = [1,1,0,0]
uniqueFeat = set(features)
curInfoGain = 0 ### 根据每一列进行拆分,所获得的信息增益
for featVal in uniqueFeat:
subDataset = splitDataset(dataset,col,featVal) ### 根据col列的featVal特征值来对数据集进行划分
prob = 1.0 * len(subDataset)/numFeatures ### 计算子特征数据集所占比例
curInfoGain += prob * calcEntropy(subDataset) ### 计算col列的特征值featVal所产生的信息增益
# print "col : " ,col , " featVal : " , featVal , " curInfoGain :" ,curInfoGain ," baseInfoGain : " ,baseInfoGain
print "col : " ,col , " curInfoGain :" ,curInfoGain ," baseInfoGain : " ,baseInfoGain
if curInfoGain > baseInfoGain:
baseInfoGain = curInfoGain
bestFeature = col
return baseInfoGain,bestFeature ### 输出最大的信息增益,以获得该增益的列
dataset,label = initDataSet()
infogain , bestFeature = findBestFeature(dataset)
print "bestInfoGain :" , infogain, " bestFeature:",bestFeature

热点内容
ftp主动被动模式工作流程图 发布:2024-12-23 11:12:58 浏览:9
让图片说话有什么安卓软件 发布:2024-12-23 11:07:04 浏览:268
qq空间上传视频要什么格式的 发布:2024-12-23 11:05:56 浏览:593
百度云服务器怎样 发布:2024-12-23 11:02:21 浏览:644
pythonlinux推荐 发布:2024-12-23 10:58:54 浏览:56
pythonurllib2没有了 发布:2024-12-23 10:57:38 浏览:606
常考算法 发布:2024-12-23 10:53:04 浏览:303
循迹小车算法 发布:2024-12-22 22:28:41 浏览:82
scss一次编译一直生成随机数 发布:2024-12-22 22:04:24 浏览:956
嫁接睫毛加密 发布:2024-12-22 21:50:12 浏览:975