统计学——从数据到结论 (第 5 版) 吴喜之 吕晓玲 编著

第1章—第8章代码 (第2章无代码)

说明: 下面代码按照书上顺序, 只分章不分节

第1章

w=read.table("change.txt",header=T)
B=xtabs(Freq~.,w)
ftable(B,col.vars =3:2 ) 
w=read.table('lowbwt.txt',header=T)
boxplot(LWT~RACE,w,horizontal = T,col=4)
w=read.csv('NYC_RS.csv',stringsAsFactors = T)
library(tidyr) # 为了下面的管道函数%>%
NM=table(w[,1]) %>% sort(decreasing = T) %>% .[1:10]
plot(log(w[w[,1]==names(NM)[1],2:3]),main='NYC rolling sales',
     ylab='LOG SALE PRICE',
     xlim=range(log(w[,2])),ylim=range(log(w[,3])))
for (i in 2:10){
  points(log(w[w[,1]==names(NM)[i],2:3]),col=i,pch=i)
}
legend('bottomright',names(NM),col=1:10,pch=1:10,cex=.6)
import pandas as pd
import matplotlib.pyplot as plt
v=pd.read_csv("NYC_RS.csv")
plt.figure(figsize=(8,2.5))
plt.scatter(np.log(v['GROSS.SQUARE.FEET']),np.log(v['SALE.PRICE']))
plt.title('NYC rolling sales')
plt.xlabel('LOG GROSS SQUARE FEET')
plt.ylabel('LOG SALE PRICE')
w=pd.read_csv("change.txt",sep=" ")
pd.pivot_table(w,index=["Age"],values=["Freq"],
               columns=["Change","Edu"],aggfunc=[np.sum])
pd.pivot_table(w,index=["Edu"],values=["Freq"],
               columns=["Change"],aggfunc=[np.sum])
u=pd.read_csv('lowbwt.txt',sep="\s+")
plt.figure(figsize=(15,5))
sns.boxplot(y="RACE", x="LWT", data=u, orient='h')
v=pd.read_csv("NYC_RS.csv")
from collections import Counter
d=Counter(v['NEIGHBORHOOD'])
Top=[]
for k,w in enumerate(sorted(d, key=d.get, reverse=True)):
    if k<10:
        Top.append(w)
v1=v[v['NEIGHBORHOOD'].isin(Top)]
sns.relplot(x=np.log(v1['GROSS.SQUARE.FEET']), 
            y=np.log(v1["SALE.PRICE"]), 
            hue=v1["NEIGHBORHOOD"],height=4,aspect=3)

第3章

v=read.table("Billianaires.txt",sep=",",header=T,na.strings="-")
par(mfrow=c(1,2))  #准备画两个并排的图,c(1,2)表示一行两列
hist(v$Age,main="",xlab="Age")
rug(v$Age) #在直方图下面标出每个点的位置
hist(v$Net.Worth,main="",xlab="Net Worth");rug(v$Net)
w=v[v[,6]=="United States"|v[,6]=="China"|v[,6]=="Japan",]
w[,6]=as.character(w[,6])
boxplot(Age~Country.of.Citizenship,w,horizontal = TRUE)
stem(v[v[,6]=="China",4])
v=read.table("g100.txt",sep=",",header=T)
plot(v$Assets, v$Sales, pch=1, col=1, xlab="Assets(Billion $)",
     ylab="Sales(Billion $)", ylim=c(0,600), xlim=c(-350,3000),
     cex=log(v$Profits), main = 
 "Global 100 Companys' Assets, Sales and log Profits (size of points)")
identify(v$Assets, v$Sales,labels=v$Company)
w=read.table("global2000.txt",sep=",",header=T)
ws=sort(table(w$Country),de=T)
pie(ws[1:10])
title("Number of Companies Among top 10")
barplot(ws[1:10],cex.names =.8,horiz = T, col=4,las=1,
        main="Number of Companies Among top 10")
v=read.table("g100.txt",sep=",",header=T)
library(TeachingDemos);
q=v[1:10,4:7];row.names(q)=v[1:10,2]
faces(q,nrow=2,ncol=5)
stars(q,nrow=2,ncol=5,lwd = 2)
v=read.table("Billianaires.txt",sep=",",header=T,na.strings="-")
library(ineq); plot(Lc(v[,3]),col='red');Gini(v[,3])
w=read.table("grade.txt",header=T)
par(mfrow=c(1,2))
boxplot(grade~class, w,main="Original Grades")
boxplot(standardized~class, w,main="Standardized Grades")
#导入包
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
os.chdir("E:/")#设置工作目录
v=pd.read_csv("Billianaires.txt") #读入数据
a=v["Age"].values
a=filter(lambda x: x != "-", a) #删除缺失值
a=np.array(list(map(int,a)))
b=v["Net Worth"].values
b=np.array(list(map(int,b)))

plt.figure(figsize = (12,3)) #设置图片大小
plt.subplot(1, 2, 1) #两个并排的图之一
plt.xlabel("Age")
plt.ylabel("Frequency")
sns.distplot(a,bins=10,kde=False,rug=True,
    hist_kws={"color":"white","edgecolor":"black","alpha":1},
    rug_kws={"color":"black","height":"0.02"})#在直方图下面标出各点位置

plt.subplot(1, 2, 2) #两个并排的图之二
plt.xlabel("Net Worth")
plt.ylabel("Frequency")
sns.distplot(b,bins=10,kde=False,rug=True,
    hist_kws={"color":"white","edgecolor":"black","alpha":1},
    rug_kws={"color":"black","height":"0.02"})
plt.show()
#准备数据
w=v[v["Country of Citizenship"].isin(["United States","China","Japan"])]
w=w[~(w.Age=="-")]
w["Age"]=list(map(int,w["Age"]))
plt.figure(figsize=(20,6))
p=sns.boxplot(y="Country of Citizenship",x="Age", data=w,color="white",
              linewidth=2)
plt.xlabel("Age",size=15)
plt.ylabel("Country of Citizenship",size=15)
plt.show()
#准备数据
w=v[v["Country of Citizenship"].isin(["China"])]
w=w[~(w.Age=="-")]
a=list(map(int,w["Age"]))

from itertools import groupby
#茎的单位为10岁, 叶子单位为1岁
for k,g in groupby(sorted(a), key=lambda x: int(x) // 10):
    lst=map(str,[int(y) % 10 for y in list(g)])
    print (k,'|',' '.join(lst))
v=pd.read_csv("g100.txt") #读入数据
import math
plt.figure(figsize=(20,6))
plt.scatter(v["Assets"],v["Sales"],marker='o',
            s=20*np.array(list(map(math.log,v["Profits"]))),
            edgecolors="black",color="white")
plt.xlabel("Assets (Billion $)")
plt.ylabel("Sales (Billion $)")
plt.title("Global 100 Companys' Assets, Sales and log Profits \
(size of points)")
plt.show()
from collections import Counter
A=sorted(Counter(v["Country"]).items(), key=lambda x:x[1], 
         reverse= True)[:10]
A=pd.DataFrame(A)

import matplotlib.pyplot as plt
plt.figure(figsize=(20,6))
plt.pie(A.iloc[:,1],labels=A.iloc[:,0],labeldistance=1.1,colors=
        ["white","gray","darkgrey","darkgray","silver","lightgray",
         "lightgrey","gainsboro","whitesmoke","grey"],
        wedgeprops={"edgecolor":"black",'linewidth':0.5}) #设置颜色
plt.title("Number of Companies Among top 10")
plt.show()
plt.figure(figsize = (20,6)) #设置图片大小
x=np.arange(len(A)) #将文本转换为整数
plt.yticks(x, A.iloc[:,0]) #结合整数和标签
plt.barh(x, A.iloc[:,1],color="navy",edgecolor="black",alpha=0.5)
plt.title("Number of Companies Among top 10")
plt.show()
v=pd.read_csv("grade.txt",sep="\t") #读入数据
v["class"]=list(map(int,v["class"])) 
plt.figure(figsize = (20,6)) #设置图片大小
plt.subplot(1, 2, 1) #准备画两个并排的图 
sns.boxplot(x="class",y="grade",data=v,color="white",linewidth=2) 
plt.title("Original Grades")
plt.subplot(1, 2, 2) 
sns.boxplot(x="class",y="standardized",data=v,color="white",linewidth=2) 
plt.title("Standardized Grades")
plt.show()

第4章

par(mfrow=c(3,3))
for(p in seq(0.1,0.9,.1)) 
  barplot(dbinom(0:5,5,p),names.arg = as.character(0:5),
          main=paste('p=',p),col=4)
dbinom(0:5,5,.5);dbinom(0:5,5,.7)
P=outer(0:20,c(3,6,10),dpois)
matplot(P,lty=1:3,pch=15:17,type='b',xlab='k',ylab='p(k)')
legend(19,0.23, expression(lambda==3,lambda==6,lambda==10),
  lty=1:3,col=1:3,pch=15:17)
x=rnorm(50000)
par(mfrow=c(2,2))
for (i in c(15,50,100)) hist(x,i,axes=F,xlab="",ylab="")
curve(dnorm(x),from=-4,to=4,axes=F,main="density");abline(h=0)
x=seq(-5,5,.01)
plot(x,dnorm(x,-2,0.5),xlim=c(-5,5),lty=3,type="l")
lines(x,dnorm(x,0,1),lty=1)
legend(3.6,.8,c('N(-2,0.5)','N(0,1)'),lty = c(3,1))
library(tidyverse)
x=seq(-4,4,.01);y=dnorm(x)
data <- tibble(x = x, y = y) %>% 
  mutate(variable = case_when(
    (x >.51 & x <= 1.57) ~ "On",
    TRUE ~ NA_character_))
g1=ggplot(data, aes(x, y)) + geom_line() +
  geom_area(data = filter(data, variable == 'On'), fill = 'red') +
  labs(y=expression(Phi(x)))
data <- tibble(x = x, y = y) %>% 
  mutate(variable = case_when(
    (x >qnorm(.95)) ~ "On",
    TRUE ~ NA_character_))
g2=ggplot(data, aes(x, y)) + geom_line() +
  geom_area(data = filter(data, variable == 'On'), fill = 'red') +
  annotate(geom="text",x=3,y=.035,label=expression(P(Z>z[0.05])==0.05))+
  annotate(geom="text",x=1.645,y=.012,label=expression(z[0.05]==1.645))+
  annotate(geom="text", x=0, y=.2,label=expression(P(Z<z[0.05])==0.95))+
  labs(x="z", y="Density of N(0,1)",title="Tail probability of N(0,1)")
library(patchwork)
g1+g2
ggplot(data = data.frame(x= c(-0.5,1.5)),aes(x=x)) +
  stat_function(fun = dunif, args=list(0,1),colour = "red",linetype=1) +
  labs(x="",y="")
data(tips, package="reshape2")
hist(tips$total_bill)
rug(tips$total_bill)
plot(ecdf(tips$total_bill), verticals = TRUE, do.points = FALSE)
plot(ecdf(tips$size), verticals = TRUE)
x=sample(tips$total_bill,80000,replace = TRUE)
hist(x,18)
rug(x)
library(reshape2)
data(tips)
B=9999; s=NULL; m=NULL
for (i in 1:B){ 
  s=c(s,median(sample(tips$total_bill,replace = TRUE)))
  m=c(m,mean(sample(tips$total_bill,replace = TRUE)))
}   
layout(t(1:2))
hist(s,30,col=2);rug(s)
hist(m,30,col=4);rug(m)
#导入包
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
import os
os.chdir("E:/")
fig=plt.figure(figsize=(20,6)) #设置图片大小
fig.subplots_adjust(hspace=0.6,wspace=0.3) #设置图片距离

for p in range(1,10):
    mybar=fig.add_subplot(3,3,p)
    mybar.bar(range(6),stats.binom.pmf(range(6),5,(p+0.0)/10),
              color="blue",edgecolor="black",alpha=0.5)
    mybar.set_xticks(range(6)) #设置x轴
    mybar.set_xticklabels(range(6))
    plt.title("p="+str((p+0.0)/10))
plt.show()
print(stats.binom.pmf(range(6),5,0.5))
print(stats.binom.pmf(range(6),5,0.7))
plt.figure(figsize=(10,4)) #设置图片大小
plt.plot(range(21),stats.poisson.pmf(range(21),3),label=r'$\lambda=3$',
         linewidth=1,color='black',marker='o', 
         markerfacecolor='black',markersize=6) #折线1
plt.plot(range(21),stats.poisson.pmf(range(21),6),label=r'$\lambda=6$',
         linewidth=1,color='gray',marker='+', 
         markerfacecolor='black',markersize=8,linestyle='--') #折线2
plt.plot(range(21),stats.poisson.pmf(range(21),10),
         label=r'$\lambda=10$',  linewidth=2,color='grey',marker='^', 
         markerfacecolor='black',markersize=6,linestyle=':') #折线3
plt.xlabel('k')
plt.ylabel('p(k)')
plt.legend() #图例
plt.show()
x=stats.norm.rvs(size=50000) #产生随机数
x.sort()

plt.figure(figsize=(20,6)) #设置图片大小
plt.subplots_adjust(hspace=0.6,wspace=0.3) #设置图片距离
mybins=[15,50,100]

#绘制直方图
for i in range(3):
    plt.subplot(2,2,(i+1))
    sns.distplot(x,bins=mybins[i],kde=False,
              hist_kws={"color":"white","edgecolor":"black","alpha":1})
    plt.axhline(y=0,color="black")
    plt.axis('off')
    plt.title("Histogram of x")
    
#绘制密度曲线图
plt.subplot(2,2,4)
plt.plot(x,stats.norm.pdf(x),color="black")
plt.axis('off')
plt.axhline(y=0,color="black")
plt.title("density")
plt.show()
x=np.arange(-5,5,0.1) #产生序列

plt.figure(figsize=(20,6)) #设置图片大小
plt.plot(x,stats.norm.pdf(x,-2,0.5),linewidth=1,
         color='black',label="N(-2,0.5)")
plt.plot(x,stats.norm.pdf(x,0,1),linewidth=1,
         color='black',linestyle="--",
         label="N(0,1)")
plt.legend() #绘制图例
plt.show()
import scipy.stats as stats
x=np.arange(-4,4,0.1) 
y=stats.norm.pdf(x)#产生序列
plt.figure(figsize=(21,6)) #设置图片大小
plt.subplot(121)
plt.plot(x,y,linewidth=1,color='black') #绘制密度曲线
plt.fill_between(np.arange(0.51,1.57,0.1),
                 stats.norm.pdf(np.arange(0.51,1.57,0.1)),
                 color="blue",alpha=0.5) #曲线下方区域
plt.xlabel("x")
plt.ylabel(r"$\Phi(x)$") #y轴标签

plt.subplot(122)
plt.plot(x,y,linewidth=1,color='black') #绘制密度曲线
plt.fill_between(np.arange(stats.norm.ppf(0.95),4,0.1),
                 stats.norm.pdf(np.arange(stats.norm.ppf(0.95),4,0.1)),
                 color="red",alpha=0.5) #曲线下方区域
plt.xlabel("z")
plt.ylabel("Density of N(0,1)") #y轴标签
plt.title("Tail probability of N(0,1)")
plt.text(2.5,0.035,r"$P(Z>z_{0.05})=0.05$")
plt.text(1.645,-0.005,r"$z_{0.05}=1.645$")
plt.text(-1,0.2,r"$P(Z<z_{0.05}=0.95)$")
x=np.arange(-0.5,1.5,0.01) #产生序列
plt.figure(figsize=(20,6)) #设置图片大小
plt.plot(x,stats.uniform.pdf(x,0,1),linewidth=1,
         color='black',label=r"$F(3,20)$")
plt.show()
w=pd.read_csv("tips.csv")
plt.figure(figsize=(21,7))
plt.xlabel("total_bill")
plt.ylabel("Frequency")
sns.distplot(w["total_bill"],bins=10,kde=False,rug=True,
    hist_kws={"color":"yellow","edgecolor":"black","alpha":1},
    rug_kws={"color":"black","height":"0.02"})#在直方图下面标出点的位置
from statsmodels.distributions.empirical_distribution import ECDF
ecdf=ECDF(w["total_bill"])
plt.figure(figsize=(21,7))
plt.title("ECDF of total_bill")
plt.xlabel("x")
plt.ylabel(r"$F_n(x)$")
plt.plot(ecdf.x,ecdf.y)
size=ECDF(w["size"])
plt.figure(figsize=(21,7))
plt.title("ECDF of size")
plt.xlabel("x")
plt.ylabel(r"$F_n(x)$")
plt.plot(size.x,size.y)
x=np.random.choice(w["total_bill"],80000)
plt.figure(figsize=(21,7))
plt.xlabel("total_bill bootstrap sample")
plt.ylabel("Frequency")
sns.distplot(x,bins=18,kde=False,rug=True,
    hist_kws={"color":"yellow","edgecolor":"black","alpha":1},
    rug_kws={"color":"black","height":"0.02"})
B=9999;s=[];m=[]
for i in range(B):
    x=np.random.choice(w["total_bill"],len(w["total_bill"]))
    s.append(np.median(x))
    m.append(np.mean(x))
plt.figure(figsize=(21,7))
plt.subplot(121)
plt.hist(x=s, bins='auto', color='#0504aa',alpha=0.7, rwidth=0.85)
plt.title('Sampling distribution of median')
plt.subplot(122)
plt.hist(x=m, bins='auto', color='#0504ff',alpha=0.7, rwidth=0.85)
plt.title('Sampling distribution of mean')

第5章

w=read.csv("Bidding.csv")[,-(1:3)];w$Class=factor(w$Class)
par(mfrow=c(3,3))
for (i in 1:9) {
  t=paste(names(w)[i],'~',names(w)[10])
  f=formula(t) 
  boxplot(f,horizontal = TRUE,w,ylab = "Class", xaxt='n', col=4)
  title(t)
}
boxplot(Auction_Duration~Class,w[w[,3]>0.75,],horizontal = TRUE, col=4)
title('Auction_Duration~Class when Successive_Outbidding>0.75')
w=read.csv("Bidding.csv")[,-(1:3)];w$Class=factor(w$Class)
library(rpart.plot)
b=rpart(Class~.,w);rpart.plot(b,extra=1,digit=4)
sb=CVC0(w,D=10,Z=10,fun=rpart) 
# 函数 CVC0 中: w 为数据, D为因变量的位置, Z为折数, fun为模型
# CVC0 函数输出预测值 $pred,   混淆矩阵 $cfm 及 误判率 $error 
barplot(b$variable.importance,horiz = TRUE,las=2, 
        main="Variable importance",col=4,cex.axis = .5)
u=read.csv("Concrete.csv")
require(ggplot2)
require(GGally)
ggplot2::theme_set(ggplot2::theme_bw()) 
u %>% ggpairs()
u=read.csv("Concrete.csv")
library(rpart.plot)
a=rpart(Compressive.strength~.,u) 
rpart.plot(a,extra=1,digit=4)
nmse=CVR(u,D=9,Z=10,seed = 8888)$nmse # u 为数据, D为因变量位置, Z为折数
a=rpart(Compressive.strength~.,u);D=9
nmsr=sum((u[,9]-predict(a,u))^2)/sum((u[,D]-mean(u[,D]))^2)
cat('10 fold cross validation NMSE = ', nmse,
'\nNMSE for training set = ', nmsr)
barplot(a$variable.importance,horiz = TRUE,las=2, 
        main="Variable importance",col=4,cex.axis = .5)
library(ipred)
w=read.csv("pendigits.csv");w[,17]=factor(w[,17])
pen_bagging=ipred::bagging(V17~.,w,coob=TRUE)
w=read.csv("boston.csv"); w$CHAS=factor(w$CHAS)
library(ipred)
bos_bagging=ipred::bagging(MEDV~.,w,coob=TRUE)
v=read.csv('bidding.csv')[,-c(1:3)]
v$Class=factor(v$Class)
library(adabag)
sb_ada=boosting(Class~.,v)
barplot(sb_ada$importance,horiz = TRUE,las=2,col=4,cex.axis = .7)
v=read.csv("derm.csv")
for (i in (1:ncol(v))[-34]) v[,i]=factor(v[,i])
library(randomForest)
derm_rf=randomForest(V35~.,v,proximity=TRUE,localImp=TRUE)
par(mfrow=c(2,2))
for(i in 1:ncol(sb_rf$importance))
  barplot(sb_rf$importance[,i], horiz = TRUE,las=2,
          cex.axis = .5, main=colnames(sb_rf$importance)[i],
          cex.names=0.4,col=4)
matplot(1:34, derm_rf$local[,1:100], type = "l", xaxt="n",
        xlab="Variable", ylab = "Local importance",
        main="Local importance")
  axis(side = 1,at = 1:34,labels = rownames(derm_rf$local), 
       las=3, cex.axis=.8)
NM=names(v)[1:8]
par(mfrow=c(2,4))
for(i in 1:8)
  partialPlot(derm_rf,pred.data=v,NM[i],xlab=NM[i],
              main=paste("Partial Dependence on",NM[i]))
plot(outlier(sb_rf$proximity), type="h",ylab="Outlying measure")
u=read.csv("Concrete.csv")
library(randomForest)
con_rf=randomForest(Compressive.strength~., u, proximity=TRUE,
                    localImp=TRUE)
SSE=sum((u$Compressive.strength-mean(u$Compressive.strength))^2)
(OOB_NMSE=sum((u$Compressive.strength-con_rf$predicted)^2)/SSE)
# 或者 25.80956/SSE*nrow(u)
par(mfrow=c(1,2))
for(i in 1:ncol(con_rf$importance))
  barplot(con_rf$importance[,i], horiz = TRUE, las = 2,
          cex.axis = .5, main = colnames(con_rf$importance)[i],
          cex.names = 1,col = 4)
matplot(1:9,sb_rf$local[,1:100], type = "l", xaxt="n",
        xlab="Variable", ylab = "Local importance", 
        main="Local importance")
  axis(side = 1, at = 1:9, labels = rownames(sb_rf$local),
       las=0, cex.axis=.8)
colback=myb!20,fonttitle=\bfseries,fontupper=\small}\begin{tcolorbox}[breakable]
\begin{verbatim}
NM=names(u)[1:8]
par(mfrow=c(2,4))
for(i in 1:8)
partialPlot(con_rf,pred.data=u,NM[i],xlab=NM[i],
            main=paste("Partial Dependence on",NM[i]))
plot(outlier(con_rf$proximity), type="h",ylab="Outlying measure")
w=read.csv("pendigits.csv");w[,17]=factor(w[,17])
library(e1071)
pen_svm=svm(V17 ~ ., data = w)
library(kernlab)
w=read.csv("boston.csv"); w$CHAS=factor(w$CHAS)
bos_svm=ksvm(MEDV~.,w)
CVR(w,D=14,Z=10,fun=ksvm,seed=1010)$nmse 
w=read.csv("Bidding.csv")[,-(1:3)];w$Class=factor(w$Class)
bid_binom=glm(Class~.,w,family = binomial) 
pred=rep(levels(w$Class)[1],nrow(newdata))
pr=bid_binom %>% predict(newdata,type="response")
pred[pr>0.5]=levels(w$Class)[2]
pred[pr<=0.5]=levels(w$Class)[1]
w=read.csv("pendigits.csv");w[,17]=factor(w[,17])
library(MASS)
pen_lda=lda(V17~.,w)
Fold=function(Z=5,w,D,seed=7777){ 
  n=nrow(w);d=1:n;dd=list() 
  w[,D]=factor(w[,D])
  e=levels(w[,D]);T=length(e)#因变量T类 
  set.seed(seed)
  for(i in 1:T){ #i=1
    d0=d[w[,D]==e[i]];j=length(d0) 
    ZT=rep(1:Z,ceiling(j/Z))[1:j] 
    id=cbind(sample(ZT,length(ZT)),d0);dd[[i]]=id
  } #上面每个dd[[i]]是随机1:Z及i类的下标集组成的矩阵 
  mm=list()
  for(i in 1:Z){
    u=NULL;
    for(j in 1:T) u=c(u,dd[[j]][dd[[j]][,1]==i,2]) 
    mm[[i]]=u
  } #mm[[i]]为第i个下标集i=1,...,Z 
  return(mm)
}#输出Z个下标集
CVC0=function(w,D=5,Z=10,fun=rpart::rpart,seed=NULL){
  library(tidyr)
  if (is.numeric(seed)) set.seed(seed) else seed=8888
  n=nrow(w)
  mm=Fold(Z=Z,w=w,D=D,seed=seed)
  pred=sample(w[,D])
  f=formula(paste(names(w)[D],"~."))
  for (i in 1:Z) {
    m=mm[[i]]
    if (identical(fun,glm)){
      pr=fun(f,w[-m,],family=binomial) %>% predict(w[m,],type="response")
      pred[m][pr>0.5]=levels(w[,D])[2]
      pred[m][pr<=0.5]=levels(w[,D])[1]
    } else if (identical(fun,adabag::boosting)|identical(fun,MASS::lda)){
       pred[m]=fun(f,w[-m,])%>%predict(.,w[m,]) %>% .$class
    } else if (identical(fun,rpart::rpart)) {
       pred[m]=fun(f,w[-m,]) %>% predict(.,w[m,],type="class") 
    } else
      pred[m]=fun(f,w[-m,]) %>% predict(w[m,])
  }
  return(list(pred=pred,error=mean(w[,D]!=pred),cfm=table(w[,D],pred)))
}
CVR=function(w,D=1,Z=10,fun=rpart,seed=NULL){
  if (is.numeric(seed)) set.seed(seed)
  n=nrow(w)
  I=sample(rep(1:Z,ceiling(n/Z)))[1:n]
  pred=rep(999,n)
  f=formula(paste(names(w)[D],"~."))
  for (i in 1:Z) {
    m=(I==i)
    pred[m]=fun(f,w[!m,]) %>% predict(w[m,])
  }
  M=sum((w[,D]-mean(w[,D]))^2)
  nmse=sum((w[,D]-pred)^2)/M
  return(list(pred=pred,nmse=nmse))
}
library(ipred);library(randomForest);library(ada);library(e1071)
library(tidyverse)
w=read.csv("Bidding.csv")[,-(1:3)];w$Class=factor(w$Class)  
D=10;Z=10;n=nrow(w)
method=c('bagging','adaboost','randomForest','svm','logistic')
PRED = matrix(0,nrow(w),length(method),dimnames=list(NULL,method)) 
PRED = data.frame(PRED)
for(i in 1:ncol(PRED)) PRED[,i]=factor(PRED[,i],levels=c("0","1"))

mm=Fold(Z=Z,w=w,D=D,1010)
f=formula(paste(names(w)[D],"~."))
for(i in 1:Z){
  m=mm[[i]]
  PRED[m,1]=ipred::bagging(f,w[-m,]) %>% predict(w[m,])
  PRED[m,2]=boosting(f,w[-m,])%>%predict(.,w[m,]) %>% .$class
  PRED[m,3]=randomForest(f,w[-m,])%>%predict(.,w[m,])
  PRED[m,4]=svm(f, w[-m,],kernal="sigmoid") %>% predict(.,w[m,])
  pr=glm(f,w[-m,],family = binomial) %>% predict(w[m,],type="response")
  PRED[m,5][pr>0.5]="1"
  PRED[m,5][pr<=0.5]="0"
}
error=sapply(PRED,function(x)mean(x!=w$Class))
(EE=data.frame(Model=names(error),Error=error))

ggplot(EE, aes(x=Model, y=Error)) +
  geom_bar(stat="identity", width=.5, fill="navyblue") +
  labs(title=
  "Cross Validation Error Rates for 5 Methods of Classification",
    xlab='Model') +
  geom_text(aes(label=round(Error,4)), hjust=c(rep(-.1,2),rep(1,3)), 
              color=c(rep("black",2),rep("white",3)), size=5)+
  theme(axis.text.x = element_text(angle=65, vjust=0.6))+
  coord_flip()
w=read.csv("pendigits.csv");w[,17]=factor(w[,17])

library(ipred);library(randomForest);library(adabag);library(e1071)
library(MASS);library(tidyverse)

D=17;Z=10;n=nrow(w)
method=c('bagging','adaboost','randomForest','svm','lda')
PRED = matrix(1,nrow(w),length(method),dimnames=list(NULL,method)) 
PRED = data.frame(PRED)
for(i in 1:ncol(PRED)) PRED[,i]=factor(PRED[,i],levels=levels(w[,D]))

mm=Fold(Z=Z,w=w,D=D,1010)
f=formula(paste(names(w)[D],"~."))
for(i in 1:Z){
  m=mm[[i]]
  PRED[m,1]=ipred::bagging(f,w[-m,]) %>% predict(w[m,])
  PRED[m,2]=boosting(f,w[-m,])%>%predict(.,w[m,]) %>% .$class
  PRED[m,3]=randomForest(f,w[-m,])%>%predict(.,w[m,])
  PRED[m,4]=svm(f, w[-m,],kernal="sigmoid") %>% predict(.,w[m,])
  PRED[m,5]=lda(V17~.,w[-m,])%>%predict(.,w[m,])%>%.$class
}
#names(PRED)[5]="lda"

error=sapply(PRED,function(x)mean(x!=w$V17))
(EE=data.frame(Model=names(error),Error=error))

ggplot(EE, aes(x=Model, y=Error)) +
  geom_bar(stat="identity", width=.5, fill="navyblue") +
  labs(title=
  "Cross Validation Error Rates for 5 Methods of Classification",
    xlab='Model') +
  geom_text(aes(label=round(Error,4)), hjust=c(rep(1,2),rep(-.1,2),1), 
              color=c(rep("white",2),rep("black",2),"white"), size=5)+
  theme(axis.text.x = element_text(angle=65, vjust=0.6))+
  coord_flip()
w=read.csv("Concrete.csv")
library(tidyverse)
library(ipred)
library(randomForest);library(kernlab)
D=9;Z=10
gg=paste(names(w)[D],"~",".",sep="")
(gg=as.formula(gg))

n=nrow(w)
pred=matrix(999,n,4)
M=sum((w[,D]-mean(w[,D]))^2)
set.seed(1010)
I=sample(rep(1:Z,ceiling(n/Z)))[1:n]

for(i in 1:Z){
  m=(I==i)
  pred[m,1]=ipred::bagging(gg,data =w[!m,])%>%predict(w[m,])
  pred[m,2]=randomForest(gg,data =w[!m,])%>%predict(w[m,])
  pred[m,3]=ksvm(gg,data =w[!m,])%>%predict(w[m,])
  pred[m,4]=lm(gg,data =w[!m,])%>%predict(w[m,])
}
nmse=apply((sweep(pred,1,w[,D],'-'))^2,2,sum)/M
NMSE=data.frame(Method=c('bagging','randomForest','SVM','LM'),
                NMSE=nmse)
ggplot(NMSE, aes(x=Method, y=NMSE)) +
  geom_bar(stat="identity", width=.5, fill="navyblue") +
  labs(title="Normalized MSE for 4 Methods") +
  geom_text(aes(label=round(NMSE,4)), hjust=c(rep(1,4)), 
            color=c(rep("white",4)), size=3.5)+
  theme(axis.text.x = element_text(angle=65, vjust=0.6))+
  coord_flip()
w=read.csv("boston.csv")
a=lm(MEDV~.-1,w)$coef
b=NULL
for(i in c(1:13)) {
  f=formula(paste('MEDV~',names(w)[i],'-1'))
  b=c(b,lm(f,w)$coef)
}
coef=data.frame(rbind(a,b))
coef$Model=c('Multi','Single')
library(reshape2)
coef.long<-melt(coef,id.vars="Model")

library(ggplot2)
ggplot(coef.long,aes(x=variable,y=value,fill=Model))+
  geom_bar(stat="identity",position="dodge")+ 
  scale_fill_discrete(name="Model",
    labels=c("Multiple", "Simple"))+
  labs(title =paste('The differences of coefficients', 
               'between multiple and simple regression'),
    x="Models",y="Coefficient")+
  coord_flip()
#导入包
import math
import random
import graphviz
import pandas as pd
import numpy as np
import seaborn as sns
import statsmodels.api as sm
import matplotlib.pyplot as plt

from collections import defaultdict
from IPython.display import SVG

from sklearn import tree
from sklearn.model_selection import cross_validate
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier,\
     DecisionTreeRegressor, export_graphviz
from sklearn.ensemble import AdaBoostClassifier,\
     AdaBoostRegressor, RandomForestClassifier,\
     RandomForestRegressor, BaggingClassifier, BaggingRegressor
from sklearn.svm import SVC, SVR
from sklearn.preprocessing import StandardScaler

import statsmodels as tsm
import matplotlib as tplt
import sklearn as tsk

import os
os.chdir("E:/")
w=pd.read_csv("Bidding.csv").iloc[:,3:]
plt.figure(figsize=(20,6))
for i,j in enumerate(w.columns[:-1]):
    plt.subplot(3,3,i+1)
    sns.boxplot(y="Class", x=j, data=w, dodge=True,orient='h')
plt.show()
plt.figure(figsize=(20,6))
w1=w[w['Successive_Outbidding']>0.75]
sns.boxplot(y="Class", x=j, data=w1, dodge=True,orient='h')
plt.show()
X = w.iloc[:, : -1]
y = w.iloc[:, -1].astype('category')
clf = DecisionTreeClassifier(max_depth=7)
clf=clf.fit(X, y)

dot_data=tree.export_graphviz(clf,out_file=None, feature_names=X.columns,
                              rounded=True, filled=True) 
graph = graphviz.Source(dot_data) 
graph #显示图
clf = DecisionTreeClassifier(max_depth=7)
pred,cfm,error=SCCV(X,y,clf,Z=10,seed=1010)
print("confusion matrix\n",cfm,"\nerror =", error)
sns.set(font_scale=1.5);sns.pairplot(w,aspect=3)
X = u.iloc[:, : -1]
y = u.iloc[:, -1]
reg = DecisionTreeRegressor(max_depth=6)
reg.fit(X, y)
dot_data=tree.export_graphviz(reg,out_file=None, 
                              feature_names=X.columns,
                              rounded=True, filled=True) 
graph = graphviz.Source(dot_data) 
graph #显示图
reg = DecisionTreeRegressor(max_depth=6)
NMSE, pred=SRCV(X,y,reg)
NMSE
w=pd.read_csv("Bidding.csv").iloc[:,3:]
X = w.iloc[:, : -1]
y = w.iloc[:, -1].astype('category')
names = ["Bagging", "Random Forest", "AdaBoost", 'Logit']
classifiers = [
    BaggingClassifier(n_estimators=100,random_state=1010),
    RandomForestClassifier(n_estimators=500,random_state=0),
    AdaBoostClassifier(n_estimators=100,random_state=0),
    LogisticRegression(solver="liblinear")]
CLS=dict(zip(names,classifiers))
R,A=ClaCV(X,y,CLS)
BarPlot(A,'Error Rate','Model','Error Rate for Classification')

print(A)
w=pd.read_csv("pendigits.csv")
X = w.iloc[:, : -1]
y = w.iloc[:, -1].astype('category')

names = ["Bagging", "Linear SVM", "Random Forest", 'Lda']
classifiers = [
    BaggingClassifier(n_estimators=100,random_state=1010),
    SVC(kernel="linear", C=0.025,random_state=0),
    RandomForestClassifier(n_estimators=500,random_state=0),
    LinearDiscriminantAnalysis()]
CLS=dict(zip(names,classifiers))
R,A=ClaCV(X,y,CLS)
BarPlot(A,'Error Rate','Model','Error Rate for Classification')

print(A)
w=pd.read_csv("Concrete.csv")
y=w['Compressive.strength']
X=w.iloc[:,:-1]
names = ["Bagging","Random Forest","LM"]
regressors = [
    BaggingRegressor(n_estimators=100),
    RandomForestRegressor(n_estimators=500,random_state=1010),
    LinearRegression()]    
REG=dict(zip(names,regressors))

R,A=RegCV(X,y,REG,Z=10,seed=1010)
# 下面专门为SVM单独做交叉验证(因为事先需要标准化)
NMSE, pred=SRCVSTD(X,y,SVR(gamma='auto'),seed=1010)
A['SVM']=NMSE

BarPlot(A,'NMSE','Model','Normalized MSE for 4 Models')
print(A)
w=pd.read_csv("pendigits.csv")
X = w.iloc[:, : -1]
y = w.iloc[:, -1].astype('category')
clf = BaggingClassifier(n_estimators=100,random_state=1010)
pred,cfm,error=SCCV(X,y,clf,Z=10,seed=1010)

print("confusion matrix\n",cfm,"\nerror =", error)
w=pd.read_csv("boston.csv")
y=w['MEDV']
X=w.iloc[:,:-1]

reg = BaggingRegressor(n_estimators=100)
NMSE, pred=SRCV(X,y,reg)
NMSE
w=pd.read_csv("Bidding.csv").iloc[:,3:]
X = w.iloc[:, : -1]
y = w.iloc[:, -1].astype('category')
clf = AdaBoostClassifier(n_estimators=100,random_state=0)
pred,cfm,error=SCCV(X,y,clf,Z=10,seed=1010)

print("confusion matrix\n",cfm,"\nerror =", error)
w=pd.read_csv("derm.csv")
X = w.iloc[:, : -1]
y = w.iloc[:, -1].astype('category')
de_rf = RandomForestClassifier(n_estimators=500, oob_score=True)
de_rf.fit(X, y)
print('OOB error =',1-de_rf.oob_score_)
Imp=dict(zip(X.columns,de_rf.feature_importances_))
BarPlot(Imp,'Importance','Variable','Importance Plot',
        size=[20,20,30,10,15])
w=pd.read_csv("Concrete.csv")
y=w['Compressive.strength']
X=w.iloc[:,:-1]
con_rf = RandomForestRegressor(n_estimators=500,random_state=1010,
                               oob_score=True)
con_rf.fit(X, y)
print('OOB NMSE =',1-con_rf.oob_score_)
Imp=dict(zip(X.columns,con_rf.feature_importances_))
BarPlot(Imp,'Importance','Variable','Importance Plot')
w=pd.read_csv("pendigits.csv")
X = w.iloc[:, : -1]
y = w.iloc[:, -1].astype('category')

clf = SVC(kernel='linear', C=0.025,random_state=0)
pred,cfm,error=SCCV(X,y,clf,Z=10,seed=1010)
print("confusion matrix\n",cfm,"\nerror =", error)
w=pd.read_csv("boston.csv")
y=w['MEDV']
X=w.iloc[:,:-1]

reg = SVR(gamma='auto')
NMSE, pred=SRCVSTD(X,y,reg)
NMSE
w=pd.read_csv("boston.csv")
y=w['MEDV']
X=w.iloc[:,:-1]
reg=LinearRegression()
reg.fit(X, y)
reg.coef_,reg.intercept_
reg=LinearRegression()
NMSE, pred=SRCV(X,y,reg)
NMSE
w=pd.read_csv("boston.csv")
X=w.iloc[:,:-1]
y=w.iloc[:,-1]

reg=LinearRegression(fit_intercept=False)

beta1=[]
for x_name in X.columns:
    x=np.array(X[x_name]).reshape(-1,1)
    reg.fit(x,y)
    beta1.extend(reg.coef_)

reg.fit(X,y)
betam=reg.coef_

fig = plt.figure(figsize=(20, 6))
ax = fig.add_subplot(111)
ax.barh(X.columns, beta1, height=0.4, align='edge',
        label='Simple')
ax.barh(X.columns, betam, height=0.4, label='Multiple')
ax.legend()
ax.set_title('The differences of coefficients between multiple\
 and simple regression')
ax.set_xlabel('Coefficient')
ax.set_ylabel('Models')
w=pd.read_csv("Bidding.csv").iloc[:,3:]
X = w.iloc[:, : -1]
y = w.iloc[:, -1].astype('category')
clf = LogisticRegression(solver="liblinear")
pred,cfm,error=SCCV(X,y,clf,Z=10,seed=1010)

print("confusion matrix\n",cfm,"\nerror =", error)
w=pd.read_csv("pendigits.csv")
X = w.iloc[:, : -1]
y = w.iloc[:, -1].astype('category')
clf = LinearDiscriminantAnalysis()
pred,cfm,error=SCCV(X,y,clf,Z=10,seed=1010)
print("confusion matrix\n",cfm,"\nerror =", error)
def Fold(u,Z=5,seed=1010):
    u = np.array(u).reshape(-1)
    id = np.arange(len(u))
    zid = []; ID = []; np.random.seed(seed)
    for i in np.unique(u):
        n = sum(u==i)
        ID.extend(id[u==i])
        k = (list(range(Z))*int(n/Z+1))[:n]
        np.random.shuffle(k)
        zid.extend(k)
    zid = np.array(zid);ID = np.array(ID)
    zid = zid[np.argsort(ID)]
    return zid
def Rfold(n, Z, seed):
    zid = (list(range(Z))*int(n/Z+1))[:n]
    np.random.seed(seed)
    np.random.shuffle(zid)
    return(np.array(zid))
def ClaCV(X,y,CLS, Z=10,seed=8888, trace=True):
    from datetime import datetime
    n=len(y)
    Zid=Fold(y,Z,seed=seed)
    YCPred=dict();
    A=dict()
    for i in CLS:
        if trace: print(i,'\n',datetime.now())
        Y_pred=np.zeros(n)
        for j in range(Z):
            clf=CLS[i]
            clf.fit(X[Zid!=j],y[Zid!=j])
            Y_pred[Zid==j]=clf.predict(X[Zid==j])
        YCPred[i]=Y_pred 
        A[i]=np.mean(y!=YCPred[i])
    if trace: print(datetime.now())  
    R=pd.DataFrame(YCPred)
    return R, A
def RegCV(X,y,regress, Z=10, seed=8888, trace=True, u=[1]):
    from datetime import datetime
    n=len(y)
    if len(u)>1: zid=Fold(u,Z,seed)
    else: zid=Rfold(n,Z,seed)
    YPred=dict();
    M=np.sum((y-np.mean(y))**2)
    A=dict()
    for i in regress:
        if trace: print(i,'\n',datetime.now())
        Y_pred=np.zeros(n)
        for j in range(Z):
            reg=regress[i]
            reg.fit(X[zid!=j],y[zid!=j])
            Y_pred[zid==j]=reg.predict(X[zid==j])
        YPred[i]=Y_pred 
        A[i]=np.sum((y-YPred[i])**2)/M
    if trace: print(datetime.now())
    R=pd.DataFrame(YPred)    
    return R,A
def BarPlot(A,xlab='',ylab='',title='',size=[20,20,30,20,15]):
    import matplotlib.pyplot as plt
    plt.figure(figsize = (20,7))
    plt.barh(range(len(A)), A.values(), color = 'navy')#, height = 0.6)
    plt.xlabel(xlab,size=size[0])
    plt.ylabel(ylab,size=size[1])
    plt.title(title,size=size[2]) 
    plt.yticks(np.arange(len(A)),A.keys(),size=size[3])
    for v,u in enumerate(A.values()): 
        plt.text(u, v, str(round(u,4)), va = 'center',color='navy',
          size=size[4])
    plt.show()
def SRCV(X,y,REG,Z=10,seed=1010):
    n=len(y)
    zid=Rfold(n,Z,seed) 
    pred=np.zeros(n)
    for j in range(Z):
        REG.fit(X[zid!=j],y[zid!=j])
        pred[zid==j]=REG.predict(X[zid==j]) 
    NMSE=((y-pred)**2).sum()/np.sum((y-y.mean())**2)
    return NMSE, pred
def SRCVSTD(X,y,REG,Z=10,seed=1010):
    from sklearn.preprocessing import StandardScaler
    n=len(y)
    zid=Rfold(n,Z,seed) 
    X_std=StandardScaler().fit_transform(X)
    stdy=StandardScaler()
    y_std=stdy.fit_transform(y.values.reshape(-1, 1))
    pred=np.zeros(n)
    for j in range(Z):
        REG.fit(X_std[zid!=j],y_std.ravel()[zid!=j])
        pred[zid==j]=stdy.inverse_transform(REG.predict(X_std[zid==j]))    
    NMSE=((y-pred)**2).sum()/np.sum((y-y.mean())**2)
    return NMSE, pred
def SCCV(X,y,clf,Z=10,seed=1010):
    n=len(y)
    Zid=Fold(y,Z,seed=1010)
    Y_pred=np.zeros(n)
    for j in range(Z):
        clf.fit(X[Zid!=j],y[Zid!=j])
        Y_pred[Zid==j]=clf.predict(X[Zid==j])
    cfm=confusion_matrix(y,Y_pred)
    error =np.mean(y!=Y_pred)
    return Y_pred, cfm, error

第6章

w=read.table("who.txt",sep=",",header=T)
b=eigen(cor(w)) #解相关阵cor(w)的特征值问题
t(data.frame(b$va,b$va/sum(b$va),cumsum(b$va)/sum(b$va))) 
par(mfrow=c(1,2))
plot(b$va, type="o", 
     main="Scree Plot",
     xlab="Component Number",
     ylab="Eigen Value")
plot(cumsum(b$va)/sum(b$va),type="o",
     main="Cumulative Eigen Value (Ratio)",
     xlab="Component Number", 
     ylab="Cumulative Eigen Value (Ratio)")
(loadings=sweep(b$ve,2,sqrt(b$va),"*"))
plot(b$ve[,1:2],type="n",main="Loading Plot",
xlab="Component 1",ylab="Component 2")
abline(h=0);abline(v=0);text(b$ve[,1:2],names(w))
w1=as.matrix(scale(w))
plot(w1%*%b$ve[,1:2],type="n",xlab="Comp 1",ylab="Comp 2")
text(w1%*%b$ve[,1:2],row.names(w),cex=0.5)
w=read.table("who.txt",sep=",",header=T)
a=factanal(w,2,scores = "regression");a$loadings
w=read.table("trans.txt",header=T)
set.seed(44);a=kmeans(w,5)
w1=w[a$clus!=1,];hh=hclust(dist(w1), "ave")
plot(hh,labels=row.names(w1) ,xlab="Country or Area")
w=read.table("cities0.txt",sep=",",header=T)
hh=hclust(dist(w), "ave");
plot(hh,labels=row.names(w),cex=0.8);a=identify(hh)
w=read.table("tv.txt",header=T)
X=w[,1:3];Y=w[,4:6]
library(CCA)
(res=cc(X,Y))
res$scores$corr.X.xscores
res$scores$corr.Y.xscores
res$scores$corr.X.yscores
res$scores$corr.Y.yscores
w=read.table("HEcolor.txt",header=T)
ftable(xtabs(Freq~.,w), row.vars=1,col.var=2:3)
w=read.table("HEcolor.txt",header=T)
w1=xtabs(Freq~Hair+Eye,w)
library(MASS);(a=corresp(w1, nf=2))
biplot(a,xlim=c(-1,1))
#导入包
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import prince

from sklearn.preprocessing import StandardScaler
from factor_analyzer import FactorAnalyzer
from sklearn.cluster import KMeans, AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram
from sklearn.cross_decomposition import CCA
from scipy.stats import pearsonr

import matplotlib as tplt
import sklearn as tsk
import scipy as tsc
import factor_analyzer as tfa

import os
os.chdir('E:/')
who = pd.read_csv('who.txt')
who

eigen_val, eigen_vec = np.linalg.eig(np.corrcoef(who.T))
eigen_val_ratio = eigen_val / np.sum(eigen_val)
eigen_val_ratio_cusum = np.cumsum(eigen_val_ratio)
eigen = pd.DataFrame({'特征值': eigen_val, 
                      '特征值占比': eigen_val_ratio, 
                      '特征值累积比例': eigen_val_ratio_cusum})
print(eigen)

fig = plt.figure(figsize=(15, 3))
ax1 = fig.add_subplot(1, 2, 1)
ax1.plot(eigen_val)
ax1.scatter(range(len(eigen_val)), eigen_val)
ax1.set_title('Scree Plot')
ax1.set_xlabel('Component Number')
ax1.set_ylabel('Eigen Value')

ax2 = fig.add_subplot(1, 2, 2)
ax2.plot(eigen_val_ratio_cusum)
ax2.scatter(range(len(eigen_val_ratio_cusum)), eigen_val_ratio_cusum)
ax2.set_title('Cumulative Eigen Value (Ratio)')
ax2.set_xlabel('Component Number')
ax2.set_ylabel('Cumulative Eigen Value (Ratio)')

component_matrix = pd.DataFrame(np.round(-eigen_vec * 
                                         np.sqrt(eigen_val), 2))
component_matrix.index = [f'x{i}' for i in range(1, len(eigen_val) + 1)]
component_matrix.columns = list(range(1, len(eigen_val) + 1))
print(component_matrix)

fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax1.scatter(component_matrix.iloc[:, 0], 
            component_matrix.iloc[:, 1],s=5)
for x, y, z in zip(component_matrix.iloc[:, 0], 
                   component_matrix.iloc[:, 1],
                   component_matrix.index):
    ax1.text(x, y, z)
ax1.set_title('Loading Plot')
ax1.set_xlabel('Component 1')
ax1.set_ylabel('Component 2')
ax1.hlines(0, -1, 1)
ax1.vlines(0, -0.65, 0.4)

who_std = StandardScaler().fit_transform(who)
score = who_std.dot(component_matrix.iloc[:, :2])
ax2 = fig.add_subplot(1, 2, 2)
ax2.scatter(score[:, 0], score[:, 1], s=5)
ax2.set_title('Score Plot')
ax2.set_xlabel('Comp 1')
ax2.set_ylabel('Comp 2')
for x, y, z in zip(score[:, 0], score[:, 1], who.index):
    ax2.text(x, y, z)
fa = FactorAnalyzer(n_factors=2, rotation='varimax', method='ml')
fa.fit(who)
factor = pd.DataFrame(fa.loadings_)
factor.columns = ['Factor1', 'Factor2']
factor.index = [f'x{i}' for i in range(1, len(fa.loadings_) + 1)]
print(factor)

fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax1.scatter(factor.iloc[:, 0], factor.iloc[:, 1], s=5)
for x, y, z in zip(factor.iloc[:, 0], factor.iloc[:, 1], factor.index):
    ax1.text(x, y, z)
ax1.set_title('Loading Plot')
ax1.set_xlabel('Factor 1')
ax1.set_ylabel('Factor 2')

score = fa.transform(who)
ax2 = fig.add_subplot(1, 2, 2)
ax2.scatter(score[:, 0], score[:, 1], s=5)
ax2.set_title('Score Plot')
ax2.set_xlabel('Factor 1')
ax2.set_ylabel('Factor 2')
for x, y, z in zip(score[:, 0], score[:, 1], who.index):
    ax2.text(x, y, z)
trans = pd.read_csv('trans.txt', sep='\s+')
sns.clustermap(trans, metric="correlation", method="complete", 
               cmap="Blues",figsize=(20, 6.5), standard_scale=1)
from scipy.cluster.hierarchy import dendrogram, linkage,\
 set_link_color_palette
Z= linkage(trans, method='average', metric='euclidean')
set_link_color_palette(['m', 'c', 'y', 'k'])
fig = plt.figure(figsize=(18, 6), dpi=72)
dendrogram(Z, labels=trans.index, leaf_rotation=0, orientation="left",
leaf_font_size=6,color_threshold=None,above_threshold_color='blue')
set_link_color_palette(None)  
np.random.seed(8888)
k=6
cent,label=scipy.cluster.vq.kmeans2(trans, k, iter=20, thresh=1e-05,
                    minit='random', missing='warn', check_finite=True)
print("centroid:\n",cent)
for i in range(k):
    print(trans.index[label==i])
from scipy.cluster.hierarchy import dendrogram, linkage, \
set_link_color_palette
Z= linkage(trans, method='average', metric='euclidean')
set_link_color_palette(['m', 'c', 'y', 'k'])
fig = plt.figure(figsize=(21, 7), dpi=72)
dendrogram(Z, labels=list(trans.index), leaf_rotation=0, 
           orientation="left", leaf_font_size=6, color_threshold=None,
           above_threshold_color='blue')
set_link_color_palette(None)  
tv = pd.read_csv('tv.txt', sep='\s+')

X = tv.iloc[:, : 3]
Y = tv.iloc[:, 3:]
cca = CCA(n_components=3, scale=False)
cca.fit(X, Y)

print("cca.x_weights:\n",cca.x_weights_)
print("cca.y_weights:\n",cca.y_weights_)
print("cca.x_loadings:\n",cca.x_loadings_)
print("cca.y_loadings:\n",cca.y_loadings_)
print("cca.x_scores:\n",cca.x_scores_)#不展示
print("cca.y_scores:\n",cca.y_scores_)#不展示
fig=plt.figure(figsize=(10,3)) 
for i in range(3):
    fig.add_subplot(1,3,i+1)
    plt.scatter(cca.x_scores_[:,i],cca.y_scores_[:,i]) 
plt.show()
HEcolor = pd.read_csv('HEColor.txt', sep='\s+')
HEcolor
pivot_table = pd.pivot_table(HEcolor, 
                             values='Freq', index='Hair', 
                             columns='Eye', aggfunc=np.sum)
print(pivot_table)
ca = prince.CA()
ca.fit(pivot_table)
print(ca.row_coordinates(pivot_table))
print(ca.column_coordinates(pivot_table))
ca.plot_coordinates(pivot_table)

第7章

x=scan("tax.txt")
tax=ts(x, frequency = 12, start = c(1995, 1))
ts.plot(tax,ylab="Tax")
a=stl(tax, "period") #进行分解
par(mfrow=c(1,3))
plot(tax-a$time.series[,1],ylab="",main="Without Seasonal")
ts.plot(a$time.series[,1:2],main="Trend and error")
ts.plot(a$time.series[,2:3],main="Trend and Seasonal")
b=HoltWinters(tax,beta=0);tax.p=predict(b,n.ahead=12)
ts.plot(tax,xlim=c(1995,2006.5));lines(tax.p,col=1,lty=3)
x=scan("ar2.txt")
layout(matrix(c(1,1,2,3),nr=2,byrow=T))
ts.plot(x);acf(x);pacf(x)
set.seed(1010)
x1=arima.sim(list(c(2,0,0),ar=c(0.3,-0.6)),n = 200)
x2=arima.sim(list(c(0,0,2),ma=c(-0.3,-0.4)),n = 200)
x3=arima.sim(list(c(2,0,2),ar=c(.3,.6),ma=c(.5,.2)),n=200)
par(mfcol=c(2,3))
acf(x1,main="Acf of AR(2) Series")
pacf(x1,main="Pacf of AR(2) Series")
acf(x2,main="Acf of MA(2) Series")
pacf(x2,main="Pacf of MA(2) Series")
acf(x3,main="Acf of ARMA(2,2) Series")
pacf(x3,main="Pacf of ARMA(2,2) Series")
x=scan("ar2.txt")
(d=arima(x,c(2,0,0),include.mean=F))
pr=predict(d,50)
ts.plot(x)
lines(pr$pred,lty=2)
layout(t(1:3))
pacf(d$residuals)
acf(d$residuals)
plot(d$residuals,type = 'l')
x=scan("tax.txt")
tax=ts(x, frequency = 12, start = c(1995, 1))
par(mfrow=c(1,2));acf(tax);pacf(tax)
(a=arima(tax,c(0,1,1),c(1,2,1)))
library(forecast)
fit <- Arima(tax,c(0,1,1),c(1,2,1))
plot(forecast(fit,h=12))
par(mfrow=c(1,2));acf(fit$res);pacf(fit$res)
a=arima(tax,c(0,1,1),c(1,2,1)) #重复前面的拟合
B=NULL;for( i in 1:100)
B=c(B,Box.test(a$res,lag=i,type="Ljung-Box")$p.value)
plot(B,main="Ljung-Box tests", ylab="p-value",
xlab="lag",pch=16,ylim=c(0,1));abline(h=.05,lty=2)
library(forecast)
(a1=auto.arima(tax))
#导入包
import matplotlib.pyplot as plt
import statsmodels.api as sm
import numpy as np
import pandas as pd

from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()

import matplotlib as tplt
import statsmodels as tsm
import os
os.chdir('E:/')
tax = pd.read_csv('tax.TXT', sep='\s+', header=None)
tax.index = pd.date_range('1-1995', periods=len(tax), freq='M')
tax.plot(figsize=(10,3), legend=False)
stl = seasonal_decompose(tax, freq=12)

fig = plt.figure(figsize=(21, 8))
ax1 = fig.add_subplot(1, 3, 1)
ax1.set_title('Time Series Without Seasonal')
ax1.set_xlabel('Time')
(stl.observed - stl.seasonal).plot(ax=ax1)

ax2 = fig.add_subplot(1, 3, 2)
ax2.set_title('Trend and seasonal')
ax2.set_xlabel('Time')
stl.trend.plot(ax=ax2)
stl.seasonal.plot(ax=ax2)

ax3 = fig.add_subplot(1, 3, 3)
ax3.set_title('Trend and error')
ax3.set_xlabel('Time')
stl.trend.plot(ax=ax3)
stl.resid.plot(ax=ax3)
holt = ExponentialSmoothing(tax, trend='add', 
                            seasonal='add', seasonal_periods=12)
holt_fit = holt.fit()

preds = holt_fit.predict(start=len(tax), end=len(tax) + 11)
preds = pd.Series(preds, 
                  index=pd.date_range('8-2005', 
                                      periods=len(preds), freq='M'))
fig = plt.figure(figsize=(15,4))
ax = fig.add_subplot(111)
preds.plot(ax=ax)
tax.plot(ax=ax)
ar2 = pd.read_csv('ar2.txt', header=None)
fig = plt.figure(figsize=(10, 6))
ax1 = fig.add_subplot(2, 1, 1)
ax1.set_xlabel('Time')
ax1.set_ylabel('x')
ax1.plot(ar2)

ax2 = fig.add_subplot(2, 2, 3)
_ = sm.graphics.tsa.plot_acf(ar2, lags=26, ax=ax2)

ax3 = fig.add_subplot(2, 2, 4)
_ = sm.graphics.tsa.plot_pacf(ar2, lags=26, ax=ax3)
ar_process = sm.tsa.ArmaProcess(ar=np.array([1, -0.3, 0.6]))
ma_process = sm.tsa.ArmaProcess(ma=np.array([1, -0.3, -0.4]))
arma_process = sm.tsa.ArmaProcess(ar=np.array([1, -0.3, -0.6]),
                                  ma=np.array([1, 0.5, 0.2]))

x1 = ar_process.generate_sample(200)
x2 = ma_process.generate_sample(200)
x3 = arma_process.generate_sample(200)

fig = plt.figure(figsize=(15, 7))
ax1 = fig.add_subplot(231)
_ = sm.graphics.tsa.plot_acf(x1, ax=ax1, lags=25)
ax1.set_title('Acf of AR(2) Series')

ax2 = fig.add_subplot(232)
_ = sm.graphics.tsa.plot_pacf(x1, ax=ax2, lags=25)
ax2.set_title('Pacf of AR(2) Series')

ax3 = fig.add_subplot(233)
_ = sm.graphics.tsa.plot_acf(x2, ax=ax3, lags=25)
ax3.set_title('Acf of MA(2) Series')

ax4 = fig.add_subplot(234)
_ = sm.graphics.tsa.plot_pacf(x2, ax=ax4, lags=25)
ax4.set_title('Pacf of MA(2) Series')

ax5 = fig.add_subplot(235)
_ = sm.graphics.tsa.plot_acf(x3, ax=ax5, lags=25)
ax5.set_title('Acf of ARMA(2, 2) Series')

ax6 = fig.add_subplot(236)
_ = sm.graphics.tsa.plot_pacf(x3, ax=ax6, lags=25)
ax6.set_title('Pacf of ARMA(2, 2) Series')
ar2_model = sm.tsa.statespace.SARIMAX(ar2,order=(2,0,0), 
                                      trend='n').fit(disp=False)
print(ar2_model.params)

fig = plt.figure(figsize=(15, 4))
ax1 = fig.add_subplot(1, 3, 1)
_ = sm.graphics.tsa.plot_acf(ar2_model.resid, lags=25, ax=ax1)

ax2 = fig.add_subplot(1, 3, 2)
_ = sm.graphics.tsa.plot_pacf(ar2_model.resid, lags=25, ax=ax2)

ax3 = fig.add_subplot(1, 3, 3)
ax3.plot(ar2_model.resid)

第8章

w=read.table("poison.txt",header=T) #读入数据
library(survival);k=rep(1,48)
par(mfrow=c(1,3))
fit0=survfit(Surv(Time,k)~1,data=w)
plot(fit0,xlab="Time",ylab="Survival Function")
title("(a) All Cases")
fit1=survfit(Surv(Time,k)~Treatment,data=w)
plot(fit1,xlab="Time",ylab="Survival Function",lty=1:4)
title("(b) By Treatment")
legend("topright",paste("Treatment-",1:4),lty=1:4)
fit2=survfit(Surv(Time,k)~Poison,data=w)
plot(fit2,xlab="Time",ylab="Survival Function",lty=1:3)
title("(c) By Poison")
legend("topright",paste("Poison-",1:3),lty=1:3)
u=read.csv("pharynx1.txt",sep=",")#读入数据
library(survival)
fit0=survfit(Surv(TIME, STATUS) ~ 1, data=u ,type="kaplan-meier")
fit=survfit(Surv(TIME, STATUS) ~ TX, data=u ,type="kaplan-meier")
par(mfrow=c(1,2));
plot(fit0,conf.int = F,mark.time=T,xlab="Time",ylab="Survival Function")
title("All Data")
plot(fit,lty=1:2,mark.time=T,xlab="Time",ylab="Survival Function")
title("Comparison");legend("topright",c("TX=1","TX=2"),lty=1:2)
w=read.table("poison.txt",header=T)
for(i in 1:2) w[,i]=factor(w[,i])
k=rep(1,48)
fit=coxph(Surv(Time,k)~.,data=w)
summary(fit)
bh=basehaz(fit)
plot(hazard~time,bh,type="l")
#导入包
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from lifelines import KaplanMeierFitter, CoxPHFitter

import matplotlib as tplt
import lifelines as tfl

import os
os.chdir('E:/')
poison = pd.read_csv('poison.txt', sep='\s+')
poison

fig = plt.figure(figsize=(20, 4))

ax1 = fig.add_subplot(1, 3, 1)
all_kmf = KaplanMeierFitter()
all_kmf.fit(poison['Time'])
all_kmf.plot(ax=ax1, label='All Cases')
ax1.set_ylabel('Survival Function')
ax1.set_title('All Cases')

ax2 = fig.add_subplot(1, 3, 2)
for treatment, df in poison.groupby('Treatment'):
    treatment_kmf = KaplanMeierFitter()
    treatment_kmf.fit(df['Time'])
    treatment_kmf.plot(ax=ax2, label=f'Treatment-{treatment}')
ax2.set_title('By Treatment')

ax3 = fig.add_subplot(1, 3, 3)
for poi, df in poison.groupby('Poison'):
    poison_kmf = KaplanMeierFitter()
    poison_kmf.fit(df['Time'])
    poison_kmf.plot(ax=ax3, label=f'Poison-{poi}')
ax3.set_title('By Poison')
pharynx1 = pd.read_csv('pharynx1.txt')
pharynx1

fig = plt.figure(figsize=(15, 4))

ax1 = fig.add_subplot(1, 2, 1)
all_kmf = KaplanMeierFitter()
all_kmf.fit(pharynx1['TIME'])
all_kmf.plot(ax=ax1, label='All Cases')
ax1.set_ylabel('Survival Function')
ax1.set_title('All Data')

ax2 = fig.add_subplot(1, 2, 2)
for TX, df in pharynx1.groupby('TX'):
    tx_kmf = KaplanMeierFitter()
    tx_kmf.fit(df['TIME'])
    tx_kmf.plot(ax=ax2, label=f'TX-{TX}')
ax2.set_title('Comparison')
poison = pd.read_csv('poison.txt', sep='\s+')

df = pd.DataFrame(poison['Time'])
df = df.join(pd.get_dummies(poison['Poison'], prefix='Poison', 
                            prefix_sep='').iloc[:, 1:])
df = df.join(pd.get_dummies(poison['Treatment'], prefix='Treament', 
                            prefix_sep='').iloc[:, 1:])
cox_model = CoxPHFitter()
cox_model.fit(df, 'Time')
cox_model.print_summary()

cox_model.predict_cumulative_hazard(df)[0].plot(figsize=(15,4))
plt.ylabel('hazard')
plt.xlabel('time')