https://github.com/cran/gstat
Tip revision: ce494d93a27106ca691c205c35c60843717717fa authored by Edzer Pebesma on 14 March 2022, 14:20:05 UTC
version 2.0-9
version 2.0-9
Tip revision: ce494d9
allier.Rout.save
R version 3.6.2 (2019-12-12) -- "Dark and Stormy Night"
Copyright (C) 2019 The R Foundation for Statistical Computing
Platform: x86_64-pc-linux-gnu (64-bit)
R is free software and comes with ABSOLUTELY NO WARRANTY.
You are welcome to redistribute it under certain conditions.
Type 'license()' or 'licence()' for distribution details.
R is a collaborative project with many contributors.
Type 'contributors()' for more information and
'citation()' on how to cite R or R packages in publications.
Type 'demo()' for some demos, 'help()' for on-line help, or
'help.start()' for an HTML browser interface to help.
Type 'q()' to quit R.
> # Sytze de Bruin's post to r-sig-geo, Nov 16, 2015:
> library(sp)
> library(gstat)
>
> # some data
> x <- c(215, 330, 410, 470, 545)
> y <- c(230, 310, 330, 340, 365)
> fc <- c(0.211, 0.251, 0.281, 0.262, 0.242)
> por <- c(0.438, 0.457, 0.419, 0.430, 0.468)
> Allier <- data.frame(x, y, fc, por)
> coordinates(Allier) = ~x+y
>
> # gstat object for co-kriging using linear model of co-regionalization
> g <- gstat(id=c("fc"), formula=fc~1, data=Allier,
+ model=vgm(0.00247, "Sph", 480, 0.00166))
> g <- gstat(g, id="por", formula=por~1, data=Allier,
+ model=vgm(0.00239, "Sph", 480, 0.00118))
> g <- gstat(g, id=c("fc", "por"),
+ model=vgm(0.00151, "Sph", 480, -0.00124))
>
> # predict at single point
> g$set = list(choleski = 0) # LDL'
> A <- predict(g, SpatialPoints(data.frame(x=450, y=350)), debug = 32)
Linear Model of Coregionalization found. Good.
[using ordinary cokriging]
we're at location X: 450 Y: 350 Z: 0
zero block size
we're at point X: 450 Y: 350 Z: 0
# X:
Matrix: 10 by 2
rbind(
c( 1.000000, 0.000000), # row 1
c( 1.000000, 0.000000), # row 2
c( 1.000000, 0.000000), # row 3
c( 1.000000, 0.000000), # row 4
c( 1.000000, 0.000000), # row 5
c( 0.000000, 1.000000), # row 6
c( 0.000000, 1.000000), # row 7
c( 0.000000, 1.000000), # row 8
c( 0.000000, 1.000000), # row 9
c( 0.000000, 1.000000) # row 10
)
[using generalized covariances: max_val - semivariance()]
# Covariances (x_i, x_j) matrix C (upper triangle):
Matrix: 10 by 10
rbind(
c( 0.004130, 0.001419, 0.000896, 0.000566, 0.000224, 0.000270, 0.000868, 0.000548, 0.000346, 0.000137), # row 1
c( 0.001419, 0.004130, 0.001840, 0.001398, 0.000879, 0.000868, 0.000270, 0.001125, 0.000854, 0.000537), # row 2
c( 0.000896, 0.001840, 0.004130, 0.002003, 0.001424, 0.000548, 0.001125, 0.000270, 0.001225, 0.000870), # row 3
c( 0.000566, 0.001398, 0.002003, 0.004130, 0.001865, 0.000346, 0.000854, 0.001225, 0.000270, 0.001140), # row 4
c( 0.000224, 0.000879, 0.001424, 0.001865, 0.004130, 0.000137, 0.000537, 0.000870, 0.001140, 0.000270), # row 5
c( 0.000270, 0.000868, 0.000548, 0.000346, 0.000137, 0.003570, 0.001373, 0.000867, 0.000547, 0.000217), # row 6
c( 0.000868, 0.000270, 0.001125, 0.000854, 0.000537, 0.001373, 0.003570, 0.001780, 0.001352, 0.000851), # row 7
c( 0.000548, 0.001125, 0.000270, 0.001225, 0.000870, 0.000867, 0.001780, 0.003570, 0.001938, 0.001378), # row 8
c( 0.000346, 0.000854, 0.001225, 0.000270, 0.001140, 0.000547, 0.001352, 0.001938, 0.003570, 0.001805), # row 9
c( 0.000137, 0.000537, 0.000870, 0.001140, 0.000270, 0.000217, 0.000851, 0.001378, 0.001805, 0.003570) # row 10
)
# glm->C, LDL' decomposed::
Matrix: 10 by 10
rbind(
c( 0.003399, 0.372269, 0.208612, 0.127967, 0.016746, -0.025847, 0.223862, 0.148767, 0.104047, 0.038371), # row 1
c( 0.000000, 0.002661, 0.483725, 0.327484, 0.153742, 0.233021, -0.125118, 0.262410, 0.219271, 0.150536), # row 2
c( 0.000000, 0.000000, 0.002276, 0.540203, 0.303585, 0.058071, 0.314376, -0.175899, 0.295181, 0.243817), # row 3
c( 0.000000, 0.000000, 0.000000, 0.002484, 0.483068, 0.000301, 0.115816, 0.377362, -0.115338, 0.319418), # row 4
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.003690, -0.038142, 0.000474, 0.120915, 0.377730, 0.075630), # row 5
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.003019, 0.354325, 0.235465, 0.164684, 0.060733), # row 6
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.002623, 0.415337, 0.347059, 0.238266), # row 7
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.002458, 0.467207, 0.385909), # row 8
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.002658, 0.505569), # row 9
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.003570) # row 10
)
# X'C-1 X:
Matrix: 2 by 2
rbind(
c(665.433994, -232.313340), # row 1
c(-232.313340, 729.440848) # row 2
)
# beta:
Vector: dim: 2
c( 0.244216, 0.440933)
# Cov(beta), (X'C-1 X)-1:
Matrix: 2 by 2
rbind(
c( 0.001691, 0.000538), # row 1
c( 0.000538, 0.001542) # row 2
)
# Corr(beta):
Matrix: 2 by 2
rbind(
c( 1.000000, 0.333447), # row 1
c( 0.333447, 1.000000) # row 2
)
# X0 (X values at prediction location x0):
Matrix: 2 by 2
rbind(
c( 1.000000, 0.000000), # row 1
c( 0.000000, 1.000000) # row 2
)
# BLUE(mu), E(y(x0)) = X0'beta:
Vector: dim: 2
c( 0.244216, 0.440933)
# Covariances (x_i, x_0), C0:
Matrix: 10 by 2
rbind(
c( 0.000638, 0.000390), # row 1
c( 0.001516, 0.000927), # row 2
c( 0.002126, 0.001300), # row 3
c( 0.002298, 0.001405), # row 4
c( 0.001738, 0.001062), # row 5
c( 0.000390, 0.000618), # row 6
c( 0.000927, 0.001467), # row 7
c( 0.001300, 0.002057), # row 8
c( 0.001405, 0.002223), # row 9
c( 0.001062, 0.001681) # row 10
)
# C-1 C0:
Matrix: 10 by 2
rbind(
c( 0.008927, -0.017995), # row 1
c( 0.061935, -0.028550), # row 2
c( 0.225195, 0.078344), # row 3
c( 0.356735, 0.190177), # row 4
c( 0.094128, -0.023136), # row 5
c(-0.021119, 0.004935), # row 6
c(-0.033508, 0.055601), # row 7
c( 0.091947, 0.242576), # row 8
c( 0.223200, 0.398927), # row 9
c(-0.027153, 0.088995) # row 10
)
# [a] Cov_ij(B,B) or Cov_ij(0,0):
Matrix: 2 by 2
rbind(
c( 0.004130, 0.000270), # row 1
c( 0.000270, 0.003570) # row 2
)
# [c] (x0-X'C-1 c0)'(X'C-1 X)-1(x0-X'C-1 c0):
Matrix: 2 by 2
rbind(
c( 0.000129, -0.000107), # row 1
c(-0.000107, 0.000089) # row 2
)
# [b] c0'C-1 c0:
Matrix: 2 by 2
rbind(
c( 0.001926, 0.001532), # row 1
c( 0.001532, 0.001931) # row 2
)
# Best Linear Unbiased Predictor:
Vector: dim: 2
c( 0.253090, 0.441258)
# MSPE ([a]-[b]+[c]):
Matrix: 2 by 2
rbind(
c( 0.002332, -0.001369), # row 1
c(-0.001369, 0.001729) # row 2
)
# kriging weights:
Matrix: 10 by 2
rbind(
c( 0.071177, -0.064988), # row 1
c( 0.108451, -0.066123), # row 2
c( 0.268160, 0.043141), # row 3
c( 0.401959, 0.153312), # row 4
c( 0.150253, -0.065342), # row 5
c(-0.076273, 0.056759), # row 6
c(-0.077605, 0.093781), # row 7
c( 0.050632, 0.277731), # row 8
c( 0.179933, 0.435972), # row 9
c(-0.076688, 0.135756) # row 10
)
> g$set = list(choleski = 1) # Choleski
> B <- predict(g, SpatialPoints(data.frame(x=450, y=350)), debug = 32)
Linear Model of Coregionalization found. Good.
[using ordinary cokriging]
we're at location X: 450 Y: 350 Z: 0
zero block size
we're at point X: 450 Y: 350 Z: 0
# X:
Matrix: 10 by 2
rbind(
c( 1.000000, 0.000000), # row 1
c( 1.000000, 0.000000), # row 2
c( 1.000000, 0.000000), # row 3
c( 1.000000, 0.000000), # row 4
c( 1.000000, 0.000000), # row 5
c( 0.000000, 1.000000), # row 6
c( 0.000000, 1.000000), # row 7
c( 0.000000, 1.000000), # row 8
c( 0.000000, 1.000000), # row 9
c( 0.000000, 1.000000) # row 10
)
[using generalized covariances: max_val - semivariance()]
# Covariances (x_i, x_j) matrix C (upper triangle):
Matrix: 10 by 10
rbind(
c( 0.004130, 0.001419, 0.000896, 0.000566, 0.000224, 0.000270, 0.000868, 0.000548, 0.000346, 0.000137), # row 1
c( 0.001419, 0.004130, 0.001840, 0.001398, 0.000879, 0.000868, 0.000270, 0.001125, 0.000854, 0.000537), # row 2
c( 0.000896, 0.001840, 0.004130, 0.002003, 0.001424, 0.000548, 0.001125, 0.000270, 0.001225, 0.000870), # row 3
c( 0.000566, 0.001398, 0.002003, 0.004130, 0.001865, 0.000346, 0.000854, 0.001225, 0.000270, 0.001140), # row 4
c( 0.000224, 0.000879, 0.001424, 0.001865, 0.004130, 0.000137, 0.000537, 0.000870, 0.001140, 0.000270), # row 5
c( 0.000270, 0.000868, 0.000548, 0.000346, 0.000137, 0.003570, 0.001373, 0.000867, 0.000547, 0.000217), # row 6
c( 0.000868, 0.000270, 0.001125, 0.000854, 0.000537, 0.001373, 0.003570, 0.001780, 0.001352, 0.000851), # row 7
c( 0.000548, 0.001125, 0.000270, 0.001225, 0.000870, 0.000867, 0.001780, 0.003570, 0.001938, 0.001378), # row 8
c( 0.000346, 0.000854, 0.001225, 0.000270, 0.001140, 0.000547, 0.001352, 0.001938, 0.003570, 0.001805), # row 9
c( 0.000137, 0.000537, 0.000870, 0.001140, 0.000270, 0.000217, 0.000851, 0.001378, 0.001805, 0.003570) # row 10
)
# glm->C, Choleski decomposed::
Matrix: 10 by 10
rbind(
c( 0.064265, 0.022086, 0.013942, 0.008801, 0.003487, 0.004201, 0.013502, 0.008523, 0.005380, 0.002132), # row 1
c( 0.000000, 0.060351, 0.025382, 0.019938, 0.013290, 0.012840, -0.000468, 0.015517, 0.012189, 0.008125), # row 2
c( 0.000000, 0.000000, 0.057370, 0.023954, 0.018091, 0.002846, 0.016530, -0.004230, 0.014644, 0.011059), # row 3
c( 0.000000, 0.000000, 0.000000, 0.055509, 0.020471, -0.000277, 0.006286, 0.016960, -0.006686, 0.012514), # row 4
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.056523, -0.001665, 0.001218, 0.006437, 0.014711, -0.005337), # row 5
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.058108, 0.022018, 0.011347, 0.006008, 0.001147), # row 6
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.050877, 0.027084, 0.018381, 0.010720), # row 7
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.045247, 0.026914, 0.017658), # row 8
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.042645, 0.023811), # row 9
c( 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.046872) # row 10
)
# X'C-1 X:
Matrix: 2 by 2
rbind(
c(665.433994, -232.313340), # row 1
c(-232.313340, 729.440848) # row 2
)
# beta:
Vector: dim: 2
c( 0.244216, 0.440933)
# Cov(beta), (X'C-1 X)-1:
Matrix: 2 by 2
rbind(
c( 0.001691, 0.000538), # row 1
c( 0.000538, 0.001542) # row 2
)
# Corr(beta):
Matrix: 2 by 2
rbind(
c( 1.000000, 0.333447), # row 1
c( 0.333447, 1.000000) # row 2
)
# X0 (X values at prediction location x0):
Matrix: 2 by 2
rbind(
c( 1.000000, 0.000000), # row 1
c( 0.000000, 1.000000) # row 2
)
# BLUE(mu), E(y(x0)) = X0'beta:
Vector: dim: 2
c( 0.244216, 0.440933)
# Covariances (x_i, x_0), C0:
Matrix: 10 by 2
rbind(
c( 0.000638, 0.000390), # row 1
c( 0.001516, 0.000927), # row 2
c( 0.002126, 0.001300), # row 3
c( 0.002298, 0.001405), # row 4
c( 0.001738, 0.001062), # row 5
c( 0.000390, 0.000618), # row 6
c( 0.000927, 0.001467), # row 7
c( 0.001300, 0.002057), # row 8
c( 0.001405, 0.002223), # row 9
c( 0.001062, 0.001681) # row 10
)
# C-1 C0:
Matrix: 10 by 2
rbind(
c( 0.008927, -0.017995), # row 1
c( 0.061935, -0.028550), # row 2
c( 0.225195, 0.078344), # row 3
c( 0.356735, 0.190177), # row 4
c( 0.094128, -0.023136), # row 5
c(-0.021119, 0.004935), # row 6
c(-0.033508, 0.055601), # row 7
c( 0.091947, 0.242576), # row 8
c( 0.223200, 0.398927), # row 9
c(-0.027153, 0.088995) # row 10
)
# [a] Cov_ij(B,B) or Cov_ij(0,0):
Matrix: 2 by 2
rbind(
c( 0.004130, 0.000270), # row 1
c( 0.000270, 0.003570) # row 2
)
# [c] (x0-X'C-1 c0)'(X'C-1 X)-1(x0-X'C-1 c0):
Matrix: 2 by 2
rbind(
c( 0.000129, -0.000107), # row 1
c(-0.000107, 0.000089) # row 2
)
# [b] c0'C-1 c0:
Matrix: 2 by 2
rbind(
c( 0.001926, 0.001532), # row 1
c( 0.001532, 0.001931) # row 2
)
# Best Linear Unbiased Predictor:
Vector: dim: 2
c( 0.253090, 0.441258)
# MSPE ([a]-[b]+[c]):
Matrix: 2 by 2
rbind(
c( 0.002332, -0.001369), # row 1
c(-0.001369, 0.001729) # row 2
)
# kriging weights:
Matrix: 10 by 2
rbind(
c( 0.071177, -0.064988), # row 1
c( 0.108451, -0.066123), # row 2
c( 0.268160, 0.043141), # row 3
c( 0.401959, 0.153312), # row 4
c( 0.150253, -0.065342), # row 5
c(-0.076273, 0.056759), # row 6
c(-0.077605, 0.093781), # row 7
c( 0.050632, 0.277731), # row 8
c( 0.179933, 0.435972), # row 9
c(-0.076688, 0.135756) # row 10
)
> all.equal(A,B)
[1] TRUE
>
> # TRUE
>
> proc.time()
user system elapsed
0.551 0.028 0.570