Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

How to efficiently split a big data.table into two according to a rule that involves 2 columns?

What is the most efficient (time and space alike) way to split the following table

dt = data.table(x=c(1,3,5,4,6,2), y=c(4,7,1,1,2,6))
> dt
   x y
1: 1 4
2: 3 7
3: 5 1
4: 4 1
5: 6 2
6: 2 6

into two separate tables, dt1, and dt2, such that dt1 contains all (x,y) rows iff (y,x) is also a row in dt, while dt2 contains the other rows:

> dt1
   x y
1: 1 4
2: 4 1
3: 6 2
4: 2 6

> dt2
   x y
1: 3 7
2: 5 1

Efficiency is critical, the full table has nearly 200M rows

like image 970
Amitai Avatar asked Dec 19 '22 18:12

Amitai


2 Answers

Another option is to perform a backward join to itself

indx <- sort.int(dt[unique(dt), on = c(x = "y", y = "x"), which = TRUE, nomatch = 0L])

dt[indx]
#    x y
# 1: 1 4
# 2: 4 1
# 3: 6 2
# 4: 2 6

dt[-indx]
#    x y
# 1: 3 7
# 2: 5 1

Benchmark - If you don't care about the order, my solution seem to be faster for a 200MM rows (both solutions result unordered)

set.seed(123)
bigdt <- data.table(x = sample(1e3, 2e8, replace = TRUE),
                    y = sample(1e3, 2e8, replace = TRUE))

system.time(i1 <- bigdt[, .I[.N>1] ,.(X=pmax(x,y), Y=pmin(y,x))]$V1)
# user  system elapsed 
# 21.81    0.82   22.97 

system.time(indx <- bigdt[unique(bigdt), on = c(x = "y", y = "x"), which = TRUE, nomatch = 0L])
#  user  system elapsed 
# 17.74    0.90   18.80 

# Checking if both unsorted and if identical when sorted
is.unsorted(i1)
# [1] TRUE
is.unsorted(indx)
# [1] TRUE

identical(sort.int(i1), sort.int(indx))
# [1] TRUE

And here is a nondegenerate case (where indx != bigdt[, .I]):

set.seed(123)
n  = 1e7
nv = 1e4
DT <- data.table(x = sample(nv, n, replace = TRUE), y = sample(nv, n, replace = TRUE))

library(microbenchmark)
microbenchmark(
  akrun  = {
    idx = DT[, .I[.N > 1], by=.(pmax(x,y), pmin(x,y))]$V1
    list(DT[idx], DT[-idx])
  },
  akrun2 = {
    idx = DT[,{
      x1 <- paste(pmin(x,y), pmax(x,y))
      duplicated(x1)|duplicated(x1, fromLast=TRUE)
    }]
    list(DT[idx], DT[!idx])
  },
  davida = {
    idx = DT[unique(DT), on = c(x = "y", y = "x"), which = TRUE, nomatch = 0L]
    list(DT[idx], DT[-idx])
  },
  akrun3 = {
    n = DT[, N := .N, by = .(pmax(x,y), pmin(x,y))]$N
    DT[, N := NULL]
    split(DT, n > 1L)
  }, times = 1)

Unit: seconds
   expr       min        lq      mean    median        uq       max neval
  akrun  7.056609  7.056609  7.056609  7.056609  7.056609  7.056609     1
 akrun2 22.810844 22.810844 22.810844 22.810844 22.810844 22.810844     1
 davida  2.738918  2.738918  2.738918  2.738918  2.738918  2.738918     1
 akrun3  5.662700  5.662700  5.662700  5.662700  5.662700  5.662700     1
like image 130
David Arenburg Avatar answered May 23 '23 09:05

David Arenburg


We could try

i1 <- dt[, .I[.N>1] ,.(X=pmax(x,y), Y=pmin(y,x))]$V1
dt[i1]
#   x y
#1: 1 4
#2: 4 1
#3: 6 2
#4: 2 6

dt[-i1]
#   x y
#1: 3 7
#2: 5 1

Or another option using duplicated

 i1 <-  dt[,{x1 <- paste(pmin(x,y), pmax(x,y))
         duplicated(x1)|duplicated(x1, fromLast=TRUE) }]
 dt[i1]
 dt[!i1]
like image 43
akrun Avatar answered May 23 '23 09:05

akrun