https://github.com/intohole/moodstyle
moodstyle机器学习脚本库
machine learning algorithm with pure python which let pramgramer learn easily;
please push your code to this project,let's learn together;
- AdBoost.py
- AdTree.py
- Classifier(object)
- AdTree(object)
- init(self)
- train(self, datas, weights, classifiers, diff=0.2)
- find_min_loss(self, datas, residuals, classifiers)
- update_residual(self, datas, residuals , classifier)
- classify(self, data)
- Ann.py
- Ann(testInterface.Classify)
- init(self , w , learn_rate = 0.1 , labels = [1 , -1 ])
- __train(self , data , label)
- classify(self , data , *argv , **kw )
- train(self , datas , labels , *argv , **kw)
- Array.py
- Bandit.py
- BaseStrut.py
- Bayes.py
- Bayes(object)
- train(self, datas, attr_len, labels, dense=True)
- _predict(self , data , label)
- get_prob(self , attr_index , value ,label )
- predict(self, data)
- predict_old(self, data)
- Bp.py
- Layer(object)
- RatioArray(object)
- Bp(object)
- rand(a, b)
- makeMatrix(I, J, fill=0.0)
- sigmoid(x)
- dsigmoid(y)
- NN
- init(self, ni, nh, no)
- update(self, inputs)
- backPropagate(self, targets, N, M)
- test(self, patterns)
- weights(self)
- train(self, patterns, iterations=1000, N=0.5, M=0.1)
- demo()
- Bp1.py
- Neroun(object)
- init(self , weight_len , learn_rate = 0.1 , delta = random.uniform(1 , -1))
- init_weights(self , weight_len , weight_max = 0.5 , weight_min = -0.5)
- predict(self , inputs)
- simgod(self , value)
- disgod(self , target)
- len(self)
- getitem(self , index)
- setitem(self , index , value)
- update(self , target, predict)
- Layer(object)
- OutPutLayer(Layer)
- HiddenLayer(Layer)
- Bp(object)
- Canopy.py
- Cart.py
- Node(object)
- CartTree(object)
- init(self)
- load_model(self, file_path)
- save(self, model_path)
- __train(self, datas, labels, attrs, threshold=0.01)
- split_data_by_attr(self, datas, attrs, attr_name)
- train(self, datas, attrs, labels, threshold=0.01)
- get_split_attr(self, attrs, attr)
- get_split_value(self, datas, split_index)
- calc_gini(self, datas, labels, split_index, split_value)
- get_best_feature(self, datas, labels, attrs)
- _classify(self, data, attrs, node)
- classify(self, data)
- DDistance.py
- DataSet.py
- DbScan.py
- DecisionTree.py
- Node(object)
- DecisionTree(object)
- init(self)
- train(self, datas, attrs, threshold=0.01, denseData=True, tree=None)
- entropy(probs)
- getSplitAttrs(self, attrs, attr)
- getBestFeature(self, datas, attrs, denseData)
- splitDataByAttr(self, datas, attrs, attr_name, attr_value, denseData=True)
- classify(self, data)
- DecisionTree1.py
- Node(dict)
- DecisionTree(object)
- init(self)
- load_model(self, file_path)
- save(self, model_path)
- __train(self, datas , labels , attrs, threshold=0.01, dense_data=True)
- train(self, datas , labels , attrs, threshold=0.01, dense_data=True)
- entropy(probs)
- get_split_attr(self, attrs, attr)
- get_best_feature(self, datas, labels, attrs, dense_data)
- split_data_by_attr(self, datas , labels , attrs, attr_name, attr_value, dense_data=True)
- classify(self, data)
- ID3(DecisionTree)
- C45(ID3)
- DefaultValue.py
- Dict.py
- Emm.py
- Feature.py
- FeatureExtract.py
- Document(object)
- ITextFeatureScore(object)
- CreateDocument(object)
- TextFeature(object)
- init(self, min_word_count=0, filter_rate=0.003)
- extract_feature(self, doc, top_word=0.01)
- text_feature_score(self, doc_word_count, doc_count, word_count, doc_sum)
- filter(self, doc , doc_type , word)
- IM(TextFeature)
- CHI(TextFeature)
- DF(TextFeature)
- WLLR(TextFeature)
- IG(TextFeature)
- GRTree.py
- HCluster.py
- Hmm.py
- Hmm1.py
- Interface.py
- KdTree.py
- Kmeans.py
- KmeansPlusPlus.py
- Knn.py
- LinerModel.py
- Logistic.py
- MiniBatchKMeans.py
- MiniBatchKmeans(Kmeans)
- cluster(self, datas, k, iter_count=10000, diff=0.00001)
- rand_seed(self, datas, k)
- add(self, center, data, eta, data_len)
- DMiniBatchKmeans(MiniBatchKmeans, DefaultDistance)
- Ngram.py
- OneHotCode.py
- PageRank.py
- RandomForst.py
- RegressionTree.py
- TreeNode(object)
- RegressionTree(object)
- init(self)
- train(self , datasets , targets)
- loss(self , datasets , labels , attr , split_value)
- get_target_avg(self , datasets ,targets , attr , split_value)
- init.py
- config.py