Answers:
类似于以下内容的结果应导致每个数据帧作为单个列表中的单独元素:
temp = list.files(pattern="*.csv")
myfiles = lapply(temp, read.delim)
假设您在单个目录(当前工作目录)中拥有这些CSV,并且所有这些CSV均具有小写扩展名.csv。
然后,如果您希望将这些数据帧合并成一个单一的数据帧,看在其他的答案的解决方案,使用像do.call(rbind,...),dplyr::bind_rows()或data.table::rbindlist()。
如果您确实希望将每个数据帧都放在一个单独的对象中,尽管通常不建议这样做,则可以使用以下方法assign:
temp = list.files(pattern="*.csv")
for (i in 1:length(temp)) assign(temp[i], read.csv(temp[i]))
或者,没有assign,并且为了演示(1)如何清理文件名和(2)显示如何使用list2env,可以尝试以下操作:
temp = list.files(pattern="*.csv")
list2env(
  lapply(setNames(temp, make.names(gsub("*.csv$", "", temp))), 
         read.csv), envir = .GlobalEnv)
但同样,通常最好将它们放在一个列表中。
assign... 使用此答案的更新版本的函数的任何人,如果希望分配的值驻留在全局环境中,请确保设置inherits=T。
                    一个快速而简洁的tidyverse解决方案:(比Base R的 速度快两倍以上read.csv)
tbl <-
    list.files(pattern = "*.csv") %>% 
    map_df(~read_csv(.))
和data.table的fread()甚至可以通过半再次下调的加载时间。(1/4 基础R倍)
library(data.table)
tbl_fread <- 
    list.files(pattern = "*.csv") %>% 
    map_df(~fread(.))
该stringsAsFactors = FALSE参数使数据帧因子保持自由状态(并且如marbel所指出的那样,它是的默认设置fread)
如果类型转换过于厚脸皮,则可以使用 col_types。
tbl <-
    list.files(pattern = "*.csv") %>% 
    map_df(~read_csv(., col_types = cols(.default = "c")))
如果要进入子目录来构造文件列表以进行最终绑定,请确保包括路径名,并在列表中注册文件的全名。这将使绑定工作可以在当前目录之外进行。(将完整路径名视为像护照一样操作,以允许在目录“边界”之间移回。)
tbl <-
    list.files(path = "./subdirectory/",
               pattern = "*.csv", 
               full.names = T) %>% 
    map_df(~read_csv(., col_types = cols(.default = "c"))) 
正如哈德利所描述的 在这里(大约一半):
map_df(x, f)实际上与do.call("rbind", lapply(x, f))...
额外功能 - 根据以下注释中的Niks功能请求,将文件名添加到记录中:
 
  *将原始文件添加filename到每个记录中。
代码说明:在表的初始读取期间,创建一个函数将文件名附加到每个记录。然后使用该函数而不是简单read_csv()函数。
read_plus <- function(flnm) {
    read_csv(flnm) %>% 
        mutate(filename = flnm)
}
tbl_with_sources <-
    list.files(pattern = "*.csv", 
               full.names = T) %>% 
    map_df(~read_plus(.))
(类型转换和子目录处理方法也可以read_plus()按照上面建议的第二个和第三个变体中所示的相同方式在函数内部进行处理。)
### Benchmark Code & Results 
library(tidyverse)
library(data.table)
library(microbenchmark)
### Base R Approaches
#### Instead of a dataframe, this approach creates a list of lists
#### removed from analysis as this alone doubled analysis time reqd
# lapply_read.delim <- function(path, pattern = "*.csv") {
#     temp = list.files(path, pattern, full.names = TRUE)
#     myfiles = lapply(temp, read.delim)
# }
#### `read.csv()`
do.call_rbind_read.csv <- function(path, pattern = "*.csv") {
    files = list.files(path, pattern, full.names = TRUE)
    do.call(rbind, lapply(files, function(x) read.csv(x, stringsAsFactors = FALSE)))
}
map_df_read.csv <- function(path, pattern = "*.csv") {
    list.files(path, pattern, full.names = TRUE) %>% 
    map_df(~read.csv(., stringsAsFactors = FALSE))
}
### *dplyr()*
#### `read_csv()`
lapply_read_csv_bind_rows <- function(path, pattern = "*.csv") {
    files = list.files(path, pattern, full.names = TRUE)
    lapply(files, read_csv) %>% bind_rows()
}
map_df_read_csv <- function(path, pattern = "*.csv") {
    list.files(path, pattern, full.names = TRUE) %>% 
    map_df(~read_csv(., col_types = cols(.default = "c")))
}
### *data.table* / *purrr* hybrid
map_df_fread <- function(path, pattern = "*.csv") {
    list.files(path, pattern, full.names = TRUE) %>% 
    map_df(~fread(.))
}
### *data.table*
rbindlist_fread <- function(path, pattern = "*.csv") {
    files = list.files(path, pattern, full.names = TRUE)
    rbindlist(lapply(files, function(x) fread(x)))
}
do.call_rbind_fread <- function(path, pattern = "*.csv") {
    files = list.files(path, pattern, full.names = TRUE)
    do.call(rbind, lapply(files, function(x) fread(x, stringsAsFactors = FALSE)))
}
read_results <- function(dir_size){
    microbenchmark(
        # lapply_read.delim = lapply_read.delim(dir_size), # too slow to include in benchmarks
        do.call_rbind_read.csv = do.call_rbind_read.csv(dir_size),
        map_df_read.csv = map_df_read.csv(dir_size),
        lapply_read_csv_bind_rows = lapply_read_csv_bind_rows(dir_size),
        map_df_read_csv = map_df_read_csv(dir_size),
        rbindlist_fread = rbindlist_fread(dir_size),
        do.call_rbind_fread = do.call_rbind_fread(dir_size),
        map_df_fread = map_df_fread(dir_size),
        times = 10L) 
}
read_results_lrg_mid_mid <- read_results('./testFolder/500MB_12.5MB_40files')
print(read_results_lrg_mid_mid, digits = 3)
read_results_sml_mic_mny <- read_results('./testFolder/5MB_5KB_1000files/')
read_results_sml_tny_mod <- read_results('./testFolder/5MB_50KB_100files/')
read_results_sml_sml_few <- read_results('./testFolder/5MB_500KB_10files/')
read_results_med_sml_mny <- read_results('./testFolder/50MB_5OKB_1000files')
read_results_med_sml_mod <- read_results('./testFolder/50MB_5OOKB_100files')
read_results_med_med_few <- read_results('./testFolder/50MB_5MB_10files')
read_results_lrg_sml_mny <- read_results('./testFolder/500MB_500KB_1000files')
read_results_lrg_med_mod <- read_results('./testFolder/500MB_5MB_100files')
read_results_lrg_lrg_few <- read_results('./testFolder/500MB_50MB_10files')
read_results_xlg_lrg_mod <- read_results('./testFolder/5000MB_50MB_100files')
print(read_results_sml_mic_mny, digits = 3)
print(read_results_sml_tny_mod, digits = 3)
print(read_results_sml_sml_few, digits = 3)
print(read_results_med_sml_mny, digits = 3)
print(read_results_med_sml_mod, digits = 3)
print(read_results_med_med_few, digits = 3)
print(read_results_lrg_sml_mny, digits = 3)
print(read_results_lrg_med_mod, digits = 3)
print(read_results_lrg_lrg_few, digits = 3)
print(read_results_xlg_lrg_mod, digits = 3)
# display boxplot of my typical use case results & basic machine max load
par(oma = c(0,0,0,0)) # remove overall margins if present
par(mfcol = c(1,1)) # remove grid if present
par(mar = c(12,5,1,1) + 0.1) # to display just a single boxplot with its complete labels
boxplot(read_results_lrg_mid_mid, las = 2, xlab = "", ylab = "Duration (seconds)", main = "40 files @ 12.5MB (500MB)")
boxplot(read_results_xlg_lrg_mod, las = 2, xlab = "", ylab = "Duration (seconds)", main = "100 files @ 50MB (5GB)")
# generate 3x3 grid boxplots
par(oma = c(12,1,1,1)) # margins for the whole 3 x 3 grid plot
par(mfcol = c(3,3)) # create grid (filling down each column)
par(mar = c(1,4,2,1)) # margins for the individual plots in 3 x 3 grid
boxplot(read_results_sml_mic_mny, las = 2, xlab = "", ylab = "Duration (seconds)", main = "1000 files @ 5KB (5MB)", xaxt = 'n')
boxplot(read_results_sml_tny_mod, las = 2, xlab = "", ylab = "Duration (milliseconds)", main = "100 files @ 50KB (5MB)", xaxt = 'n')
boxplot(read_results_sml_sml_few, las = 2, xlab = "", ylab = "Duration (milliseconds)", main = "10 files @ 500KB (5MB)",)
boxplot(read_results_med_sml_mny, las = 2, xlab = "", ylab = "Duration (microseconds)        ", main = "1000 files @ 50KB (50MB)", xaxt = 'n')
boxplot(read_results_med_sml_mod, las = 2, xlab = "", ylab = "Duration (microseconds)", main = "100 files @ 500KB (50MB)", xaxt = 'n')
boxplot(read_results_med_med_few, las = 2, xlab = "", ylab = "Duration (seconds)", main = "10 files @ 5MB (50MB)")
boxplot(read_results_lrg_sml_mny, las = 2, xlab = "", ylab = "Duration (seconds)", main = "1000 files @ 500KB (500MB)", xaxt = 'n')
boxplot(read_results_lrg_med_mod, las = 2, xlab = "", ylab = "Duration (seconds)", main = "100 files @ 5MB (500MB)", xaxt = 'n')
boxplot(read_results_lrg_lrg_few, las = 2, xlab = "", ylab = "Duration (seconds)", main = "10 files @ 50MB (500MB)")
行:文件计数(
1000、100、10 )列:最终数据帧大小(5MB,50MB,500MB)
(单击图像可查看原始大小)

对于最小的用例,基本的R结果更好,在这种情况下,使purrr和dplyr的C库带来的开销超过了执行较大规模的处理任务时观察到的性能提升。
如果您想运行自己的测试,则该bash脚本可能会有所帮助。
for ((i=1; i<=$2; i++)); do 
  cp "$1" "${1:0:8}_${i}.csv";
done
bash what_you_name_this_script.sh "fileName_you_want_copied" 100 将为您的文件创建100个按顺序编号的副本(文件名的前8个字符和下划线之后)。
特别感谢:
readAddFilename <- function(flnm) {     read_csv(flnm) %>%          mutate(filename = flnm) }然后将其放到,map_df而不是read_csv()现在的简单只读中。我可以更新上面的条目以显示该函数以及如果您仍有疑问或认为有帮助的话,它将如何适合管道。
                    read_csv是比慢得多fread。如果您要说更快的话,我会提供一个基准。一个想法是创建30个1GB的文件并读取它们,这在性能上很重要。
                    fread()和dplyr的read_csv():14.2 VS 19.9秒。TBH,我只是将基准R与dplyr进行了比较,并且read_csv()比dplyr 快约2-4倍read.csv(),基准测试似乎没有必要。但是,fread()旋转一下并暂停一下以检查更完整的基准测试结果很有趣。再次感谢!
                    以下是一些使用R base将.csv文件转换为一个data.frame的选项,以及一些用于读取R中文件的可用软件包。
这比下面的选项要慢。
# Get the files names
files = list.files(pattern="*.csv")
# First apply read.csv, then rbind
myfiles = do.call(rbind, lapply(files, function(x) read.csv(x, stringsAsFactors = FALSE)))
编辑: -使用一些其他的选择data.table和readr
一个fread()版本,它是data.table软件包的功能。这是迄今为止R中最快的选项。
library(data.table)
DT = do.call(rbind, lapply(files, fread))
# The same using `rbindlist`
DT = rbindlist(lapply(files, fread))
使用  readr,这是用于读取csv文件的另一个软件包。它fread比R 慢,但比R快,但功能不同。
library(readr)
library(dplyr)
tbl = lapply(files, read_csv) %>% bind_rows()
              data.table版本,可以提高性能。
                    do.call
                    以及使用 lapply在R中或其他循环构造之外,您还可以将CSV文件合并为一个文件。
在Unix中,如果文件没有标题,则其操作如下:
cat *.csv > all.csv
或者,如果有标头,并且您可以找到一个仅与标头匹配的字符串(例如,标头行均以“ Age”开头),则可以执行以下操作:
cat *.csv | grep -v ^Age > all.csv
我认为在Windows中,您可以通过DOS命令框的COPY和和SEARCH(或FIND其他方式)执行此操作,但是为什么不安装cygwin并获得Unix命令外壳的功能呢?
Git安装?
                    这是我开发的代码,用于将所有csv文件读入R。它将为每个csv文件分别创建一个数据框,并为该文件的标题命名该文件的原始名称(删除空格和.csv),希望对您有用!
path <- "C:/Users/cfees/My Box Files/Fitness/"
files <- list.files(path=path, pattern="*.csv")
for(file in files)
{
perpos <- which(strsplit(file, "")[[1]]==".")
assign(
gsub(" ","",substr(file, 1, perpos-1)), 
read.csv(paste(path,file,sep="")))
}
              @ A5C1D2H2I1M1N2O1R2T1,@ leerssej和@marbel的前三个答案基本上是相同的:将fread应用于每个文件,然后rbind / rbindlist得到的data.tables。我通常使用rbindlist(lapply(list.files("*.csv"),fread))表格。
这比其他R-internal替代方法更好,并且对于少数大型csv来说很好,但是当速度很重要时,对于大量小型csv来说不是最好的。在这种情况下,第一次使用可能会更快cat,就像@Spacedman在排名第4的答案中所建议的那样。我将在R中添加一些有关如何执行此操作的详细信息:
x = fread(cmd='cat *.csv', header=F)
但是,如果每个csv有一个标头怎么办?
x = fread(cmd="awk 'NR==1||FNR!=1' *.csv", header=T)
而且,如果您有太多文件而使*.csvShell Glob失败怎么办?
x = fread(cmd='find . -name "*.csv" | xargs cat', header=F)
并且如果所有文件都具有标头并且文件太多,该怎么办?
header = fread(cmd='find . -name "*.csv" | head -n1 | xargs head -n1', header=T)
x = fread(cmd='find . -name "*.csv" | xargs tail -q -n+2', header=F)
names(x) = names(header)
而且,如果结果串联的csv对于系统内存太大,该怎么办?
system('find . -name "*.csv" | xargs cat > combined.csv')
x = fread('combined.csv', header=F)
有标题吗?
system('find . -name "*.csv" | head -n1 | xargs head -n1 > combined.csv')
system('find . -name "*.csv" | xargs tail -q -n+2 >> combined.csv')
x = fread('combined.csv', header=T)
最后,如果您不希望目录中所有.csv而是特定的文件集怎么办?(此外,它们都具有标题。)(这是我的用例。)
fread(text=paste0(system("xargs cat|awk 'NR==1||$1!=\"<column one name>\"'",input=paths,intern=T),collapse="\n"),header=T,sep="\t")
这大约与普通fread xargs cat的速度相同:)
注意:对于data.table v1.11.6之前的版本(2018年9月19日),请省略cmd=from fread(cmd=。
附录:使用并行库的mclapply代替串行lapply,例如, rbindlist(lapply(list.files("*.csv"),fread))它比rbindlist lapply fread快得多。
将121401 csv读取到单个data.table中的时间。每个csv有3列,一个标题行,平均4.510行。机器是具有96个内核的GCP VM:
rbindlist lapply fread   234.172s 247.513s 256.349s
rbindlist mclapply fread  15.223s   9.558s   9.292s
fread xargs cat            4.761s   4.259s   5.095s
综上所述,如果您对速度感兴趣,并且具有许多文件和许多内核,那么fread xargs cat的速度比前三个答案中最快的解决方案快约50倍。
在我看来,大多数其他答案已被淘汰rio::import_list,这是一种简洁的单行代码:
library(rio)
my_data <- import_list(dir("path_to_directory", pattern = ".csv", rbind = TRUE))
任何其他参数都将传递给rio::import。rio可以应对几乎任何文件格式R可以阅读,它使用data.table的fread如果可能的话,那么它应该是快。
plyr::ldply通过启用该.parallel选项,同时读取每个大约30至40 MB的400个csv文件,使用速度大约提高了50%。示例包括文本进度栏。
library(plyr)
library(data.table)
library(doSNOW)
csv.list <- list.files(path="t:/data", pattern=".csv$", full.names=TRUE)
cl <- makeCluster(4)
registerDoSNOW(cl)
pb <- txtProgressBar(max=length(csv.list), style=3)
pbu <- function(i) setTxtProgressBar(pb, i)
dt <- setDT(ldply(csv.list, fread, .parallel=TRUE, .paropts=list(.options.snow=list(progress=pbu))))
stopCluster(cl)
              fread或user-defined functions?谢谢!
                    ?ldply显示了...传递给的其他参数.fun。无论是使用fread, skip = 100还是function(x) fread(x, skip = 100)会工作
                    function(x) fread(x, skip = 100)对我来说不起作用,但是在裸函数名称后提供其他args可以解决问题。再次感谢!
                    基于dnlbrk的注释,对于大文件,assign可能比list2env快得多。
library(readr)
library(stringr)
List_of_file_paths <- list.files(path ="C:/Users/Anon/Documents/Folder_with_csv_files/", pattern = ".csv", all.files = TRUE, full.names = TRUE)
通过将full.names参数设置为true,您将获得每个文件的完整路径,作为文件列表中单独的字符串,例如,List_of_file_paths [1]将类似于“ C:/ Users / Anon / Documents / Folder_with_csv_files / file1.csv”
for(f in 1:length(List_of_filepaths)) {
  file_name <- str_sub(string = List_of_filepaths[f], start = 46, end = -5)
  file_df <- read_csv(List_of_filepaths[f])  
  assign( x = file_name, value = file_df, envir = .GlobalEnv)
}
您可以使用data.table包的fread或base R read.csv而不是read_csv。file_name步骤使您可以整理名称,以便每个数据框都不会保留文件的完整路径作为其名称。您可以扩展循环,以便在将数据表转移到全局环境之前对数据表做更多的事情,例如:
for(f in 1:length(List_of_filepaths)) {
  file_name <- str_sub(string = List_of_filepaths[f], start = 46, end = -5)
  file_df <- read_csv(List_of_filepaths[f])  
  file_df <- file_df[,1:3] #if you only need the first three columns
  assign( x = file_name, value = file_df, envir = .GlobalEnv)
}
              只要您的计算机上有许多内核,以下代码就可以为您提供最快的大数据速度:
if (!require("pacman")) install.packages("pacman")
pacman::p_load(doParallel, data.table, stringr)
# get the file name
dir() %>% str_subset("\\.csv$") -> fn
# use parallel setting
(cl <- detectCores() %>%
  makeCluster()) %>%
  registerDoParallel()
# read and bind all files together
system.time({
  big_df <- foreach(
    i = fn,
    .packages = "data.table"
  ) %dopar%
    {
      fread(i, colClasses = "character")
    } %>%
    rbindlist(fill = TRUE)
})
# end of parallel work
stopImplicitCluster(cl)
于2020/04/16更新:当我找到一个可用于并行计算的新程序包时,使用以下代码提供了替代解决方案。
if (!require("pacman")) install.packages("pacman")
pacman::p_load(future.apply, data.table, stringr)
# get the file name
dir() %>% str_subset("\\.csv$") -> fn
plan(multiprocess)
future_lapply(fn,fread,colClasses = "character") %>% 
  rbindlist(fill = TRUE) -> res
# res is the merged data.table
              我喜欢的方式使用list.files(),lapply()并且list2env()(或者fs::dir_ls(),purrr::map()和list2env())。这似乎简单而灵活。
另外,您可以尝试使用小型软件包{ tor }(to-R):默认情况下,它将文件从工作目录导入列表(list_*()变体)或全局环境中(load_*()变体)中。
例如,在这里,我使用tor::list_csv()以下命令将工作目录中的所有.csv文件读入列表:
library(tor)
dir()
#>  [1] "_pkgdown.yml"     "cran-comments.md" "csv1.csv"        
#>  [4] "csv2.csv"         "datasets"         "DESCRIPTION"     
#>  [7] "docs"             "inst"             "LICENSE.md"      
#> [10] "man"              "NAMESPACE"        "NEWS.md"         
#> [13] "R"                "README.md"        "README.Rmd"      
#> [16] "tests"            "tmp.R"            "tor.Rproj"
list_csv()
#> $csv1
#>   x
#> 1 1
#> 2 2
#> 
#> $csv2
#>   y
#> 1 a
#> 2 b
现在,我使用以下命令将这些文件加载到我的全局环境中tor::load_csv():
# The working directory contains .csv files
dir()
#>  [1] "_pkgdown.yml"     "cran-comments.md" "CRAN-RELEASE"    
#>  [4] "csv1.csv"         "csv2.csv"         "datasets"        
#>  [7] "DESCRIPTION"      "docs"             "inst"            
#> [10] "LICENSE.md"       "man"              "NAMESPACE"       
#> [13] "NEWS.md"          "R"                "README.md"       
#> [16] "README.Rmd"       "tests"            "tmp.R"           
#> [19] "tor.Rproj"
load_csv()
# Each file is now available as a dataframe in the global environment
csv1
#>   x
#> 1 1
#> 2 2
csv2
#>   y
#> 1 a
#> 2 b
如果您需要读取特定文件,可以将其文件路径与regexp,ignore.case和匹配invert。
为了获得更大的灵活性,请使用list_any()。它允许您通过参数提供阅读器功能.f。
(path_csv <- tor_example("csv"))
#> [1] "C:/Users/LeporeM/Documents/R/R-3.5.2/library/tor/extdata/csv"
dir(path_csv)
#> [1] "file1.csv" "file2.csv"
list_any(path_csv, read.csv)
#> $file1
#>   x
#> 1 1
#> 2 2
#> 
#> $file2
#>   y
#> 1 a
#> 2 b
通过...或在lambda函数内部传递其他参数。
path_csv %>% 
  list_any(readr::read_csv, skip = 1)
#> Parsed with column specification:
#> cols(
#>   `1` = col_double()
#> )
#> Parsed with column specification:
#> cols(
#>   a = col_character()
#> )
#> $file1
#> # A tibble: 1 x 1
#>     `1`
#>   <dbl>
#> 1     2
#> 
#> $file2
#> # A tibble: 1 x 1
#>   a    
#>   <chr>
#> 1 b
path_csv %>% 
  list_any(~read.csv(., stringsAsFactors = FALSE)) %>% 
  map(as_tibble)
#> $file1
#> # A tibble: 2 x 1
#>       x
#>   <int>
#> 1     1
#> 2     2
#> 
#> $file2
#> # A tibble: 2 x 1
#>   y    
#>   <chr>
#> 1 a    
#> 2 b
              有人要求我将此功能添加到stackoverflow R包中。鉴于它是一个tinyverse程序包(并且不能依赖第三方程序包),所以我想出了以下内容:
#' Bulk import data files 
#' 
#' Read in each file at a path and then unnest them. Defaults to csv format.
#' 
#' @param path        a character vector of full path names
#' @param pattern     an optional \link[=regex]{regular expression}. Only file names which match the regular expression will be returned.
#' @param reader      a function that can read data from a file name.
#' @param ...         optional arguments to pass to the reader function (eg \code{stringsAsFactors}).
#' @param reducer     a function to unnest the individual data files. Use I to retain the nested structure. 
#' @param recursive     logical. Should the listing recurse into directories?
#'  
#' @author Neal Fultz
#' @references \url{/programming/11433432/how-to-import-multiple-csv-files-at-once}
#' 
#' @importFrom utils read.csv
#' @export
read.directory <- function(path='.', pattern=NULL, reader=read.csv, ..., 
                           reducer=function(dfs) do.call(rbind.data.frame, dfs), recursive=FALSE) {
  files <- list.files(path, pattern, full.names = TRUE, recursive = recursive)
  reducer(lapply(files, reader, ...))
}
通过参数化reader和reducer函数,人们可以选择使用data.table或dplyr,或者仅使用适合较小数据集的base R函数。