Given the following matrix:
A B C
[1,] TRUE FALSE TRUE
[2,] FALSE TRUE TRUE
[3,] FALSE FALSE TRUE
[4,] FALSE TRUE TRUE
[5,] FALSE TRUE TRUE
[6,] TRUE TRUE TRUE
m <- structure(c(TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE), .Dim = c(6L,
3L), .Dimnames = list(NULL, c("A", "B", "C")))
How we can extract the first column with TRUE value per row efficiently? Of course, we could use apply
per row and then get the min(which(...))
.
Here is the desired output:
[1] A B C B B A
This thread might seem a duplicate of my question but its not:
We can use max.col
colnames(m)[max.col(m, "first")]
#[1] "A" "B" "C" "B" "B" "A"
If there are no TRUE in a row, then we can change it to NA
(if needed)
colnames(m)[max.col(m, "first")*NA^!rowSums(m)]
Or with ifelse
colnames(m)[ifelse(rowSums(m)==0, NA, max.col(m, "first"))]
Another vision, using which
to work with the logical
class of the matrix:
colnames(m)[aggregate(col~row, data=which(m, arr.ind = TRUE), FUN=min)$col]
#[1] "A" "B" "C" "B" "B" "A"
We get the indices of the TRUE
values and then find the minimum (index of) column in which they occur, by row.
benchmark
library(microbenchmark)
n <- matrix(FALSE, nrow=1000, ncol=500) # couldn't afford a bigger one...
n <- t(apply(n, 1, function(rg) {rg[sample(1:500, 1, replace=TRUE)] <- TRUE ; rg}))
colnames(n) <- paste0("name", 1:500)
akrun <- function(n){colnames(n)[max.col(n, "first")]}
cath <- function(n){colnames(n)[aggregate(col~row, data=which(n, arr.ind = TRUE), FUN=min)$col]}
all(akrun(n)==cath(n))
#[1] TRUE
microbenchmark(akrun(n), cath(n))
# expr min lq mean median uq max neval cld
#akrun(n) 6.985716 7.233116 8.231404 7.525513 8.842927 31.23469 100 a
# cath(n) 18.416079 18.811473 19.586298 19.272398 20.262169 22.42786 100 b
Here is my attempt. It isn't a one-liner, but it is lightning fast.
joe <- function(x) {
y <- which(x)
nR <- nrow(x)
myR <- y %% nR
myR[myR==0] <- nR
myNames <- colnames(x)[ceiling(y/nR)]
myCols <- which(!(duplicated(myR)))
myNames[myCols][order(myR[myCols])]
}
Here are the benchmarks using the data provided by @Cath:
microbenchmark(akrun(n), cath(n), joe(n))
Unit: microseconds
expr min lq mean median uq max neval
akrun(n) 4248.760 5588.8640 6148.1816 5926.7130 6378.887 12502.437 100
cath(n) 12641.189 13733.1415 14808.6524 14532.8115 15559.287 20628.037 100
joe(n) 555.418 642.2405 758.5293 713.2585 800.697 4849.334 100
all.equal(akrun(n), cath(n), joe(n))
[1] TRUE
Here is another way that has a better performance w.r.t. @Cath solutions:
a <- which(m, arr.ind = T)
colnames(m)[aggregate(col~row,a[order(a[,1]),],min)$col]
# [1] "A" "B" "C" "B" "B" "A"
Benchmarking given the matrix used by @Cath:
m0h3n <- function(m){
a <- which(m, arr.ind = T)
colnames(m)[aggregate(col~row,a[order(a[,1]),],min)$col]
}
all.equal(akrun(n), cath(n), joe(n), m0h3n(n))
# [1] TRUE
microbenchmark(akrun(n), cath(n), joe(n), m0h3n(n))
# Unit: microseconds
# expr min lq mean median uq max neval
# akrun(n) 2291.981 2395.793 2871.7156 2482.7790 3561.9150 4205.370 100
# cath(n) 8263.210 8554.665 9695.9375 8782.8710 9947.9415 58239.983 100
# joe(n) 274.029 298.517 526.6722 312.0375 342.5355 2366.798 100
# m0h3n(n) 3890.178 3974.309 4280.6677 4073.1635 4227.7550 6337.501 100
Therefore, here is the ranked solutions (in terms of efficiency):
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With