當前位置:首頁 » 操作系統 » weka源碼下載

weka源碼下載

發布時間: 2022-06-14 15:45:50

㈠ 求助 weka 的ID3演算法java源碼

/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Id3.java
* Copyright (C) 1999 University of Waikato, Hamilton, New Zealand
*
*/
package weka.classifiers.trees;
import weka.classifiers.Classifier;
import weka.classifiers.Sourcable;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.core.Capabilities.Capability;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import java.util.Enumeration;
/**
<!-- globalinfo-start -->
* Class for constructing an unpruned decision tree based on the ID3 algorithm. Can only deal with nominal attributes. No missing values allowed. Empty leaves may result in unclassified instances. For more information see: <br/>
* <br/>
* R. Quinlan (1986). Inction of decision trees. Machine Learning. 1(1):81-106.
* <p/>
<!-- globalinfo-end -->
*
<!-- technical-bibtex-start -->
* BibTeX:
* <pre>
* &#64;article{Quinlan1986,
* author = {R. Quinlan},
* journal = {Machine Learning},
* number = {1},
* pages = {81-106},
* title = {Inction of decision trees},
* volume = {1},
* year = {1986}
* }
* </pre>
* <p/>
<!-- technical-bibtex-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -D
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
<!-- options-end -->
*
* @author Eibe Frank ([email protected])
* @version $Revision: 6404 $
*/
public class Id3
extends Classifier
implements TechnicalInformationHandler, Sourcable {
/** for serialization */
static final long serialVersionUID = -2693678647096322561L;
/** The node's successors. */
private Id3[] m_Successors;
/** Attribute used for splitting. */
private Attribute m_Attribute;
/** Class value if node is leaf. */
private double m_ClassValue;
/** Class distribution if node is leaf. */
private double[] m_Distribution;
/** Class attribute of dataset. */
private Attribute m_ClassAttribute;
/**
* Returns a string describing the classifier.
* @return a description suitable for the GUI.
*/
public String globalInfo() {
return "Class for constructing an unpruned decision tree based on the ID3 "
+ "algorithm. Can only deal with nominal attributes. No missing values "
+ "allowed. Empty leaves may result in unclassified instances. For more "
+ "information see: "
+ getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing
* detailed information about the technical background of this class,
* e.g., paper reference or book this class is based on.
*
* @return the technical information about this class
*/
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "R. Quinlan");
result.setValue(Field.YEAR, "1986");
result.setValue(Field.TITLE, "Inction of decision trees");
result.setValue(Field.JOURNAL, "Machine Learning");
result.setValue(Field.VOLUME, "1");
result.setValue(Field.NUMBER, "1");
result.setValue(Field.PAGES, "81-106");
return result;
}
/**
* Returns default capabilities of the classifier.
*
* @return the capabilities of this classifier
*/
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
// class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
// instances
result.setMinimumNumberInstances(0);
return result;
}
/**
* Builds Id3 decision tree classifier.
*
* @param data the training data
* @exception Exception if classifier can't be built successfully
*/
public void buildClassifier(Instances data) throws Exception {
// can classifier handle the data?
getCapabilities().testWithFail(data);
// remove instances with missing class
data = new Instances(data);
data.deleteWithMissingClass();
makeTree(data);
}
/**
* Method for building an Id3 tree.
*
* @param data the training data
* @exception Exception if decision tree can't be built successfully
*/
private void makeTree(Instances data) throws Exception {
// Check if no instances have reached this node.
if (data.numInstances() == 0) {
m_Attribute = null;
m_ClassValue = Instance.missingValue();
m_Distribution = new double[data.numClasses()];
return;
}
// Compute attribute with maximum information gain.
double[] infoGains = new double[data.numAttributes()];
Enumeration attEnum = data.enumerateAttributes();
while (attEnum.hasMoreElements()) {
Attribute att = (Attribute) attEnum.nextElement();
infoGains[att.index()] = computeInfoGain(data, att);
}
m_Attribute = data.attribute(Utils.maxIndex(infoGains));
// Make leaf if information gain is zero.
// Otherwise create successors.
if (Utils.eq(infoGains[m_Attribute.index()], 0)) {
m_Attribute = null;
m_Distribution = new double[data.numClasses()];
Enumeration instEnum = data.enumerateInstances();
while (instEnum.hasMoreElements()) {
Instance inst = (Instance) instEnum.nextElement();
m_Distribution[(int) inst.classValue()]++;
}
Utils.normalize(m_Distribution);
m_ClassValue = Utils.maxIndex(m_Distribution);
m_ClassAttribute = data.classAttribute();
} else {
Instances[] splitData = splitData(data, m_Attribute);
m_Successors = new Id3[m_Attribute.numValues()];
for (int j = 0; j < m_Attribute.numValues(); j++) {
m_Successors[j] = new Id3();
m_Successors[j].makeTree(splitData[j]);
}
}
}
/**
* Classifies a given test instance using the decision tree.
*
* @param instance the instance to be classified
* @return the classification
* @throws if instance has missing values
*/
public double classifyInstance(Instance instance)
throws {
if (instance.hasMissingValue()) {
throw new ("Id3: no missing values, "
+ "please.");
}
if (m_Attribute == null) {
return m_ClassValue;
} else {
return m_Successors[(int) instance.value(m_Attribute)].
classifyInstance(instance);
}
}
/**
* Computes class distribution for instance using decision tree.
*
* @param instance the instance for which distribution is to be computed
* @return the class distribution for the given instance
* @throws if instance has missing values
*/
public double[] distributionForInstance(Instance instance)
throws {
if (instance.hasMissingValue()) {
throw new ("Id3: no missing values, "
+ "please.");
}
if (m_Attribute == null) {
return m_Distribution;
} else {
return m_Successors[(int) instance.value(m_Attribute)].
distributionForInstance(instance);
}
}
/**
* Prints the decision tree using the private toString method from below.
*
* @return a textual description of the classifier
*/
public String toString() {
if ((m_Distribution == null) && (m_Successors == null)) {
return "Id3: No model built yet.";
}
return "Id3 " + toString(0);
}
/**
* Computes information gain for an attribute.
*
* @param data the data for which info gain is to be computed
* @param att the attribute
* @return the information gain for the given attribute and data
* @throws Exception if computation fails
*/
private double computeInfoGain(Instances data, Attribute att)
throws Exception {
double infoGain = computeEntropy(data);
Instances[] splitData = splitData(data, att);
for (int j = 0; j < att.numValues(); j++) {
if (splitData[j].numInstances() > 0) {
infoGain -= ((double) splitData[j].numInstances() /
(double) data.numInstances()) *
computeEntropy(splitData[j]);
}
}
return infoGain;
}
/**
* Computes the entropy of a dataset.
*
* @param data the data for which entropy is to be computed
* @return the entropy of the data's class distribution
* @throws Exception if computation fails
*/
private double computeEntropy(Instances data) throws Exception {
double [] classCounts = new double[data.numClasses()];
Enumeration instEnum = data.enumerateInstances();
while (instEnum.hasMoreElements()) {
Instance inst = (Instance) instEnum.nextElement();
classCounts[(int) inst.classValue()]++;
}
double entropy = 0;
for (int j = 0; j < data.numClasses(); j++) {
if (classCounts[j] > 0) {
entropy -= classCounts[j] * Utils.log2(classCounts[j]);
}
}
entropy /= (double) data.numInstances();
return entropy + Utils.log2(data.numInstances());
}
/**
* Splits a dataset according to the values of a nominal attribute.
*
* @param data the data which is to be split
* @param att the attribute to be used for splitting
* @return the sets of instances proced by the split
*/
private Instances[] splitData(Instances data, Attribute att) {
Instances[] splitData = new Instances[att.numValues()];
for (int j = 0; j < att.numValues(); j++) {
splitData[j] = new Instances(data, data.numInstances());
}
Enumeration instEnum = data.enumerateInstances();
while (instEnum.hasMoreElements()) {
Instance inst = (Instance) instEnum.nextElement();
splitData[(int) inst.value(att)].add(inst);
}
for (int i = 0; i < splitData.length; i++) {
splitData[i].compactify();
}
return splitData;
}
/**
* Outputs a tree at a certain level.
*
* @param level the level at which the tree is to be printed
* @return the tree as string at the given level
*/
private String toString(int level) {
StringBuffer text = new StringBuffer();
if (m_Attribute == null) {
if (Instance.isMissingValue(m_ClassValue)) {
text.append(": null");
} else {
text.append(": " + m_ClassAttribute.value((int) m_ClassValue));
}
} else {
for (int j = 0; j < m_Attribute.numValues(); j++) {
text.append(" ");
for (int i = 0; i < level; i++) {
text.append("| ");
}
text.append(m_Attribute.name() + " = " + m_Attribute.value(j));
text.append(m_Successors[j].toString(level + 1));
}
}
return text.toString();
}
/**
* Adds this tree recursively to the buffer.
*
* @param id the unqiue id for the method
* @param buffer the buffer to add the source code to
* @return the last ID being used
* @throws Exception if something goes wrong
*/
protected int toSource(int id, StringBuffer buffer) throws Exception {
int result;
int i;
int newID;
StringBuffer[] subBuffers;
buffer.append(" ");
buffer.append(" protected static double node" + id + "(Object[] i) { ");
// leaf?
if (m_Attribute == null) {
result = id;
if (Double.isNaN(m_ClassValue)) {
buffer.append(" return Double.NaN;");
} else {
buffer.append(" return " + m_ClassValue + ";");
}
if (m_ClassAttribute != null) {
buffer.append(" // " + m_ClassAttribute.value((int) m_ClassValue));
}
buffer.append(" ");
buffer.append(" } ");
} else {
buffer.append(" checkMissing(i, " + m_Attribute.index() + "); ");
buffer.append(" // " + m_Attribute.name() + " ");
// subtree calls
subBuffers = new StringBuffer[m_Attribute.numValues()];
newID = id;
for (i = 0; i < m_Attribute.numValues(); i++) {
newID++;
buffer.append(" ");
if (i > 0) {
buffer.append("else ");
}
buffer.append("if (((String) i[" + m_Attribute.index()
+ "]).equals("" + m_Attribute.value(i) + "")) ");
buffer.append(" return node" + newID + "(i); ");
subBuffers[i] = new StringBuffer();
newID = m_Successors[i].toSource(newID, subBuffers[i]);
}
buffer.append(" else ");
buffer.append(" throw new IllegalArgumentException("Value '" + i["
+ m_Attribute.index() + "] + "' is not allowed!"); ");
buffer.append(" } ");
// output subtree code
for (i = 0; i < m_Attribute.numValues(); i++) {
buffer.append(subBuffers[i].toString());
}
subBuffers = null;
result = newID;
}
return result;
}
/**
* Returns a string that describes the classifier as source. The
* classifier will be contained in a class with the given name (there may
* be auxiliary classes),
* and will contain a method with the signature:
* <pre><code>
* public static double classify(Object[] i);
* </code></pre>
* where the array <code>i</code> contains elements that are either
* Double, String, with missing values represented as null. The generated
* code is public domain and comes with no warranty. <br/>
* Note: works only if class attribute is the last attribute in the dataset.
*
* @param className the name that should be given to the source class.
* @return the object source described by a string
* @throws Exception if the source can't be computed
*/
public String toSource(String className) throws Exception {
StringBuffer result;
int id;
result = new StringBuffer();
result.append("class " + className + " { ");
result.append(" private static void checkMissing(Object[] i, int index) { ");
result.append(" if (i[index] == null) ");
result.append(" throw new IllegalArgumentException("Null values "
+ "are not allowed!"); ");
result.append(" } ");
result.append(" public static double classify(Object[] i) { ");
id = 0;
result.append(" return node" + id + "(i); ");
result.append(" } ");
toSource(id, result);
result.append("} ");
return result.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision: 6404 $");
}
/**
* Main method.
*
* @param args the options for the classifier
*/
public static void main(String[] args) {
runClassifier(new Id3(), args);
}
}

㈡ 哪裡可以下載weka軟體啊

http://www.cs.waikato.ac.nz/ml/weka/

㈢ C4.5演算法,請問哪裡可以下載到C4.5的windows版本源代碼多謝多謝~~

是的,weka是開源的. 記得我用過的那個版本在安裝目錄的bin文件夾里有一個weka.jar, 解壓後得到的就是源碼. c4.5在weka里的實現是j48, 大致是這個路徑吧:weka.classifiers.trees.j48

您可以把整個weka.jar作為外部library導入類似Eclipse這樣的開發環境, 這樣看源碼或者用weka的api進行二次開發都很方便的.

㈣ weka的聚類(clusterers)的那個可視化界面,它的源代碼在weka具體的哪個包裡面

我能給你的幫助僅限於這個路徑: weka.gui.explorer 包下面的 clustererPanel.class,至於怎麼抽取出來,你就把這個包的explorer.class設置一下debug調試的開始程序,然後自己摸索一下吧。

㈤ 數據挖掘用什麼軟體,求軟體名稱加下載地址!

weka是一個不錯的開源的基於java寫的數據挖掘軟體,裡面很多的演算法直接可以使用,同時你也可以利用這些java包進行二次開發。http://www.cs.waikato.ac.nz/ml/weka/是其官方網站,裡面可以下載最新的weka供你使用。
sql server也是一個不錯的數據挖掘軟體,但是貌似只有開發版的才有這個功能。所以要去找開發版進行下載,verycd上面很多資源可供下載。希望能幫到你。

㈥ 我裝的是WEKA3.6.8和JDK1.7,運行WEKA的時候提示 class not found program will exit

配置:
先下載weka開發包,在安裝目錄下有個weka-src.jar包(源代碼)和weka.jar包(可執行文件),將weka-src.jar包解壓,然後把\weka-src\src\main\java\weka文件夾拖入到waka工程的src目錄下.將weka.jar文件解壓後,將\weka\weka文件夾拷貝到weka工程磁碟目錄下的bin文件夾下。然後在所建工程的Build Path中把weka_javacodes.jar導入。最後在myeclipse或eclipse中編寫代碼運行即可。

㈦ 最近想學習數據挖掘,請問有基於weka的聚類演算法的源碼實現嗎

我最近剛開始接觸數據挖掘,在學習kmeans,由於隨著kmeans中心的隨機改動,聚類結果有些不合理的變化,所以正在試圖將初始中心定下來

㈧ weka能否進行實例過濾

這個肯定是有的。不知道你是用weka作編程開發還是只用GUI作數據挖掘

如果是用GUI的話如圖就是選擇一種FILTER實現過濾實例(對原始數據進行預處理),可以看到filter有監督的和非監督的,你可以根據需求選擇對應的filter,選好後點擊那個filter的框就可以設置具體參數和規則什麼的

如果你是用weka作開發,http://weka.sourceforge.net/doc.stable/這個是weka的API可以看到weka.filters的包然後具體的應用你自己看API就可以了

如果用GUI選擇filter選暈了不知道用哪個那也去看看API吧解釋的還是可以的是在不行去下載一個weka的源碼看看注釋不過全是E文

㈨ Weka搭建過程中向安裝好的Eclipse導入源代碼怎麼老是失敗

你先把weka-src解壓縮
在裡面
找到一個名為weka的
文件夾
,(有好幾個,注意一下)找到那個包含所有運行文件的那個,直接拖到所建工程的src下就可以了

㈩ 你在哪下載的weka啊用的怎麼樣

在weka的官方網站啊 weka源代碼開放 純java的 適合搞科研 實驗什麼的 挺實用的 不用自己編演算法了 直接調用裡面的函數

熱點內容
可以上傳球球的照片 發布:2024-11-05 15:42:59 瀏覽:738
拉箱怎麼改密碼 發布:2024-11-05 15:38:02 瀏覽:862
http怎麼配置 發布:2024-11-05 15:02:45 瀏覽:461
12級緩存 發布:2024-11-05 14:52:09 瀏覽:578
神武4腳本 發布:2024-11-05 14:48:50 瀏覽:702
王者榮耀反復解壓 發布:2024-11-05 14:31:58 瀏覽:853
存儲引擎有哪些品牌 發布:2024-11-05 14:25:59 瀏覽:39
紅薯的存儲方法 發布:2024-11-05 14:17:32 瀏覽:757
腳本錯誤彈窗口在哪裡 發布:2024-11-05 14:01:16 瀏覽:967
java微信demo 發布:2024-11-05 13:50:31 瀏覽:238