熱線電話:13121318867

登錄
首頁精彩閱讀機器學習與R之決策樹C50算法
機器學習與R之決策樹C50算法
2018-01-21
收藏

機器學習與R之決策樹C50算法

決策樹
經驗熵是針對所有樣本的分類結果而言
經驗條件熵是針對每個特征里每個特征樣本分類結果之特征樣本比例和
基尼不純度
簡單地說就是從一個數據集中隨機選取子項,度量其被錯誤分類到其他分組里的概率

決策樹算法使用軸平行分割來表現具體一定的局限性
C5.0算法--可以處理數值型和缺失 只使用最重要的特征--使用的熵度量-可以自動修剪枝
劃分數據集
set.seed(123) #設置隨機種子
train_sample <- sample(1000, 900)#從1000里隨機900個數值
credit_train <- credit[train_sample, ]
credit_test  <- credit[-train_sample, ]
library(C50)
credit_model <- C5.0(credit_train[-17], credit_train$default) #特征數據框-標簽
C5.0(train,labers,trials = 1,costs = NULL)
trials控制自動法循環次數多迭代效果更好 costs可選矩陣 與各類型錯誤項對應的成本-代價矩陣
summary(credit_model)#查看模型
credit_pred <- predict(credit_model, credit_test)#預測
predict(model,test,type="class")  type取class分類結果或者prob分類概率
單規則算法(1R算法)--單一規則直觀,但大數據底下,對噪聲預測不準
library(RWeka)
mushroom_1R <- OneR(type ~ ., data = mushrooms)
重復增量修建算法(RIPPER) 基于1R進一步提取規則
library(RWeka)

mushroom_JRip <- JRip(type ~ ., data = mushrooms)


[plain] view plain copy

    credit <- read.csv("credit.csv")  
    str(credit)  
      
    # look at two characteristics of the applicant  
    table(credit$checking_balance)  
    table(credit$savings_balance)  
      
    # look at two characteristics of the loan  
    summary(credit$months_loan_duration)  
    summary(credit$amount)  
      
    # look at the class variable  
    table(credit$default)  
      
    # create a random sample for training and test data  
    # use set.seed to use the same random number sequence as the tutorial  
    set.seed(123)  
    #從1000里隨機900個數值  
    train_sample <- sample(1000, 900)  
      
    str(train_sample)  
      
    # split the data frames切分數據集  
    credit_train <- credit[train_sample, ]  
    credit_test  <- credit[-train_sample, ]  
      
    # check the proportion of class variable類別的比例  
    prop.table(table(credit_train$default))  
    prop.table(table(credit_test$default))  
      
    ## Step 3: Training a model on the data ----  
    # build the simplest decision tree  
    library(C50)  
    credit_model <- C5.0(credit_train[-17], credit_train$default)  
      
    # display simple facts about the tree  
    credit_model  
      
    # display detailed information about the tree  
    summary(credit_model)  
      
    ## Step 4: Evaluating model performance ----  
    # create a factor vector of predictions on test data  
    credit_pred <- predict(credit_model, credit_test)  
      
    # cross tabulation of predicted versus actual classes  
    library(gmodels)  
    CrossTable(credit_test$default, credit_pred,  
               prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE,  
               dnn = c('actual default', 'predicted default'))  
      
    ## Step 5: Improving model performance ----  
      
    ## Boosting the accuracy of decision trees  
    # boosted decision tree with 10 trials提高模型性能 利用boosting提升  
    credit_boost10 <- C5.0(credit_train[-17], credit_train$default,  
                           trials = 10)  
    credit_boost10  
    summary(credit_boost10)  
      
    credit_boost_pred10 <- predict(credit_boost10, credit_test)  
    CrossTable(credit_test$default, credit_boost_pred10,  
               prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE,  
               dnn = c('actual default', 'predicted default'))  
      
    ## Making some mistakes more costly than others  
      
    # create dimensions for a cost matrix  
    matrix_dimensions <- list(c("no", "yes"), c("no", "yes"))  
    names(matrix_dimensions) <- c("predicted", "actual")  
    matrix_dimensions  
      
    # build the matrix設置代價矩陣  
    error_cost <- matrix(c(0, 1, 4, 0), nrow = 2, dimnames = matrix_dimensions)  
    error_cost  
      
    # apply the cost matrix to the tree  
    credit_cost <- C5.0(credit_train[-17], credit_train$default,  
                              costs = error_cost)  
    credit_cost_pred <- predict(credit_cost, credit_test)  
      
    CrossTable(credit_test$default, credit_cost_pred,  
               prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE,  
               dnn = c('actual default', 'predicted default'))  
      
    #### Part 2: Rule Learners -------------------  
      
    ## Example: Identifying Poisonous Mushrooms ----  
    ## Step 2: Exploring and preparing the data ---- 自動因子轉換--將字符標記為因子減少存儲  
    mushrooms <- read.csv("mushrooms.csv", stringsAsFactors = TRUE)  
      
    # examine the structure of the data frame  
    str(mushrooms)  
      
    # drop the veil_type feature  
    mushrooms$veil_type <- NULL  
      
    # examine the class distribution  
    table(mushrooms$type)  
      
    ## Step 3: Training a model on the data ----  
    library(RWeka)  
      
    # train OneR() on the data  
    mushroom_1R <- OneR(type ~ ., data = mushrooms)  
      
    ## Step 4: Evaluating model performance ----  
    mushroom_1R  
    summary(mushroom_1R)  
      
    ## Step 5: Improving model performance ----  
    mushroom_JRip <- JRip(type ~ ., data = mushrooms)  
    mushroom_JRip  
    summary(mushroom_JRip)  
      
    # Rule Learner Using C5.0 Decision Trees (not in text)  
    library(C50)  
    mushroom_c5rules <- C5.0(type ~ odor + gill_size, data = mushrooms, rules = TRUE)  
    summary(mushroom_c5rules)

數據分析咨詢請掃描二維碼

若不方便掃碼,搜微信號:CDAshujufenxi

數據分析師資訊
更多

OK
客服在線
立即咨詢
日韩人妻系列无码专区视频,先锋高清无码,无码免费视欧非,国精产品一区一区三区无码
客服在線
立即咨詢