1 Installation

  1. Install the package from Bioconductor.
if (!requireNamespace("BiocManager", quietly = TRUE))
    install.packages("BiocManager")
BiocManager::install("RcwlPipelines")

The development version is also available to download from Github.

BiocManager::install("hubentu/RcwlPipelines")
  1. Load the package into the R session.
library(RcwlPipelines)
library(dplyr)

2 Tools and pipelines scripts

The R scripts to build the CWL tools and pipelines are collected in a github repository now (https://github.com/hubentu/RcwlRecipes), which is community effort to collect Bioinformatics tools and pipelines using Rcwl and CWL (Common Workflow Language).

Three functions are used to collect the Rcwl scripts, search tools recipes by keywords and load the scripts to current R environment.

2.1 Indexing recipe scripts

The cwlUpdate function can update the recipe scripts from the github repository and collect meta data to a local cache by the BiocFileCache package. By default the local cache will be created under your home directory for the first time. Here we use temporary directory for example.

tools <- cwlUpdate(cachePath = tempfile())
#> Update scripts...
tools
#> class: BiocFileCache
#> bfccache: /tmp/RtmpqutAFt/file787141e04582
#> bfccount: 129
#> For more information see: bfcinfo() or bfcquery()

2.2 Search by keyword

The function cwlSearch can help to search indexed recipes by keywords. For example, here we try to find the alignment tool bwa mem.

tl <- cwlSearch(c("bwa", "mem"), tools)
data.frame(tl)
#>     rid  rname         create_time         access_time
#> 1 BFC81 tl_bwa 2020-08-11 01:36:48 2020-08-11 01:36:48
#>                                                               rpath rtype
#> 1 /tmp/RtmpqutAFt/file787141e04582/RcwlRecipes-master/Rcwl/tl_bwa.R local
#>                                                               fpath
#> 1 /tmp/RtmpqutAFt/file787141e04582/RcwlRecipes-master/Rcwl/tl_bwa.R
#>   last_modified_time etag expires Type Command
#> 1                 NA <NA>      NA tool bwa mem
#>                             Container
#> 1 biocontainers/bwa:v0.7.17-3-deb_cv1

2.3 Loading tools and pipelines

The function cwlInstall can be used to “install” to tools or pipelines to current environment by given the script path.

cwlInstall(tl$rpath)
bwa
#> class: cwlParam 
#>  cwlClass: CommandLineTool 
#>  cwlVersion: v1.0 
#>  baseCommand: bwa mem 
#> requirements:
#> - class: DockerRequirement
#>   dockerPull: biocontainers/bwa:v0.7.17-3-deb_cv1
#> inputs:
#>   threads (int): -t 
#>   RG (string): -R 
#>   Ref (File):  
#>   FQ1 (File):  
#>   FQ2 (File?):  
#> outputs:
#> sam:
#>   type: File
#>   outputBinding:
#>     glob: '*.sam'
#> stdout: bwaOutput.sam

Or we can install the tools by its rname directly.

cwlInstall(rname = 'tl_bwa', bfc = tools)

That’s it! The tool “bwa” is ready to use.

3 Build a pipeline

We can develop a pipline by utilizing the available tools. For example, a simple alignment pipelines with mapping and marking duplicates can be built from the tools.

First, we check whether the required tools (bwa, samtools and picard markduplicates) are available.

tls <- cwlSearch("bwa|sam2bam|sortBam|samtools_index|markdup", tools) %>%
    filter(Type == "tool") %>%
    select(rname, rpath, Command, Container)
tls
#> # A tibble: 6 x 4
#>   rname      rpath                             Command      Container           
#>   <chr>      <chr>                             <chr>        <chr>               
#> 1 tl_bwa     /tmp/RtmpqutAFt/file787141e04582… bwa mem      biocontainers/bwa:v…
#> 2 tl_bwa_in… /tmp/RtmpqutAFt/file787141e04582… bwa index    biocontainers/bwa:v…
#> 3 tl_markdup /tmp/RtmpqutAFt/file787141e04582… picard Mark… quay.io/biocontaine…
#> 4 tl_sam2bam /tmp/RtmpqutAFt/file787141e04582… samtools vi… biocontainers/samto…
#> 5 tl_samtoo… /tmp/RtmpqutAFt/file787141e04582… samtools in… biocontainers/samto…
#> 6 tl_sortBam /tmp/RtmpqutAFt/file787141e04582… samtools so… biocontainers/samto…

To load all the tools.

invisible(sapply(tls$rpath, cwlInstall))

Next, we define the input parameters.

p1 <- InputParam(id = "threads", type = "int")
p2 <- InputParam(id = "RG", type = "string")
p3 <- InputParam(id = "Ref", type = "string")
p4 <- InputParam(id = "FQ1", type = "File")
p5 <- InputParam(id = "FQ2", type = "File?")

Then we define the pipeline steps, from raw fastqs to duplicates marked alignments.

## bwa
s1 <- Step(id = "bwa", run = bwa,
           In = list(threads = "threads",
                     RG = "RG",
                     Ref = "Ref",
                     FQ1 = "FQ1",
                     FQ2 = "FQ2"))
## sam to bam
s2 <- Step(id = "sam2bam", run = sam2bam,
           In = list(sam = "bwa/sam"))
## sort bam
s3 <- Step(id = "sortBam", run = sortBam,
           In = list(bam = "sam2bam/bam"))
## mark duplicates
s4 <- Step(id = "markdup", run = markdup,
           In = list(ibam = "sortBam/sbam",
                     obam = list(
                         valueFrom="$(inputs.ibam.nameroot).mdup.bam"),
                     matrix = list(
                         valueFrom="$(inputs.ibam.nameroot).markdup.txt")))
## index bam
s5 <- Step(id = "idxBam", run = samtools_index,
           In = list(bam = "markdup/mBam"))

Last, we define the outputs and connect the steps to a new pipeline.

req1 <- list(class = "StepInputExpressionRequirement")
req2 <- list(class = "InlineJavascriptRequirement")
## outputs
o1 <- OutputParam(id = "Bam", type = "File", outputSource = "markdup/mBam")
o2 <- OutputParam(id = "Idx", type = "File", outputSource = "idxBam/idx")
## stepParam
Align <- cwlStepParam(requirements = list(req1, req2),
                      inputs = InputParamList(p1, p2, p3, p4, p5),
                      outputs = OutputParamList(o1, o2))
## build pipeline
Align <- Align + s1 + s2 + s3 + s4 + s5

The pipeline is ready for use. We can plot the pipeline with plotCWL from the Rcwl package.

plotCWL(Align)
#> Warning: The `x` argument of `as_tibble.matrix()` must have unique column names if `.name_repair` is omitted as of tibble 2.0.0.
#> Using compatibility `.name_repair`.
#> This warning is displayed once every 8 hours.
#> Call `lifecycle::last_warnings()` to see where this warning was generated.

4 Pipelines summary

There are mainly 4 pipelines are collected in this package. Here is a brief introduction to these pipelines. More pipelines and tools are expected to be included in the future.

4.1 DNASeq alignment pipeline

The pipeline can be used to preprocess DNA sequences in fastq format. It can take paired fastqs, read groups from multiple batches as input.

cwlInstall(rname = "pl_alignMerge", bfc = tools)
inputs(alignMerge)
#> inputs:
#>   idBam (string):  
#>   RG (string[]):  
#>   threads (int):  
#>   Ref (File):  
#>   FQ1s (File[]):  
#>   FQ2s (File[]):

The pipeline includes two steps and several jobs will be run in each step.

  1. bwaAlign: bwa alignment by read groups.
runs(runs(alignMerge)[[1]])
#> List of length 4
#> names(4): bwa sam2bam sortBam idxBam
  • bwa: To align fastqs and read groups to reference genome with bwa.
  • sam2bam: To convert the alignments in “sam” format to “bam” format with samtools.
  • sortBam: To sort the “bam” file by coordinates with samtools.
  • idxBam: To index “bam” file with samtools.
  1. mergeBamDup: Merge by samples and markduplicates.
runs(runs(alignMerge)[[2]])
#> List of length 4
#> names(4): mergeBam markdup samtools_index samtools_flagstat
  • mergeBam: To merge bam files from multiple batches with picard.
  • markdup: To mark duplicates with picard.
  • samtools_index: To index bam file with samtools.
  • samtools_flagstat: To summarize flags in bam with samtools.

The final bam files after duplicates marked, bam index, duplicates matrix, and flag statistics summary will be in the output folder.

outputs(alignMerge)
#> outputs:
#> oBam:
#>   type: File
#>   outputSource: mergeBamDup/oBam
#> matrix:
#>   type: File
#>   outputSource: mergeBamDup/matrix
#> Idx:
#>   type: File
#>   outputSource: mergeBamDup/Idx
#> stat:
#>   type: File
#>   outputSource: mergeBamDup/stat

Here you can find an example to run the pipeline.

https://hubentu.github.io/others/Rcwl/application.html#dnaseq-alignment-pipeline

4.2 RNASeq pipeline

The pipeline was built with reads quality summary, STAR alignment, quantification by featureCounts and RSeQC quality control. Here are the inputs.

cwlInstall(rname = "pl_rnaseq_Sf", bfc = tools)
inputs(rnaseq_Sf)
#> inputs:
#>   in_seqfiles (File[]):  
#>   in_prefix (string):  
#>   in_genomeDir (Directory):  
#>   in_GTFfile (File):  
#>   in_runThreadN (int):  1

The pipeline includes 6 steps.

  • fastqc: To run quality summary for raw fastqs with fastqc.
  • STAR: To align fastqs with STAR.
  • samtools_index: To index aligned bam file.
  • samtools_flagstat: To summary alignment flags.
  • featureCounts: To quantify gene abundances.
  • RSeQC: Several steps included.
    • gtfToGenePred: To convert GTF annotation to “genePred” format.
    • genePredToBed: To convert “genePred” annotation to “bed” format.
    • r_distribution: To run reads distribution over genome features.
    • gCoverage: To summarize read coverage over gene body.

The outputs and logs from alignment, quantification and QC steps are collected together to the output folder. A final QC report could be generated by multiqc, which is also available in the data package.

An example to run the pipeline.

https://hubentu.github.io/others/Rcwl/application.html#rnaseq-pipeline

4.3 GATK4 germline variant calling pipeline

The GATK4 best practice pipeline for germline variant calling was implemented with Workflow Description Language (WDL). We wrapped the WDL pipeline into 3 steps with Rcwl. The details of the pipeline can be find here: https://software.broadinstitute.org/gatk/best-practices/workflow?id=11145

  1. GAlign GATK alignment.

The fastqs, sample information and customized json files for WDL are required as inputs. Multiple steps will run in this step, including bwa alignment, mark duplicates and base quality recalibration. GATK ready BAM files will be collected to the output directory.

  1. hapCall HaplotypeCaller.

The GATK ready BAM and customized json files are inputs in this step. The local paths of GATK bundle files are required to be modified in your json file. A “gVCF” files will be generated.

  1. jdCall Joint variant discovery

This step will combine the “gVCF” files and then call germline variants in all samples. The paths of the local bundle files are also required to be changed in the json template file. The final VCF file of germline variants will be collected.

An example to run the pipeline.
https://hubentu.github.io/others/Rcwl/application.html#gatk4-germline-variant-calling-pipeline

4.4 GATK4 Somatic short variant pipeline

The GATK4 Mutect2 pipeline for germline variant calling was also available in WDL. The pipeline was reimplemented with Rcwl based on the best practice documents. https://software.broadinstitute.org/gatk/best-practices/workflow?id=11146

cwlInstall(rname = "pl_GPoN", bfc = tools)
cwlInstall(rname = "pl_Mutect2PL", bfc = tools)
  1. Variant calling on normal samples

First, we need to run Mutect2 in tumor-only mode for each normal sample by the tool Mutect2. The argument “–max-mnp-distance 0” is required to be added because the next step, “GenpmicsDBImport”, can’t handle MNPs.

arguments(Mutect2) <- list("--max-mnp-distance", "0")
Mutect2
#> class: cwlParam 
#>  cwlClass: CommandLineTool 
#>  cwlVersion: v1.0 
#>  baseCommand: gatk Mutect2 
#> requirements:
#> - class: DockerRequirement
#>   dockerPull: broadinstitute/gatk:4.1.3.0
#> arguments: --max-mnp-distance 0 
#> inputs:
#>   tbam (File): -I 
#>   nbam (File?): -I 
#>   Ref (File): -R 
#>   normal (string?): -normal 
#>   germline (File?): --germline-resource 
#>   pon (File?): --panel-of-normals 
#>   interval (File?): -L 
#>   out (string): -O 
#> outputs:
#> vout:
#>   type: File
#>   secondaryFiles:
#>   - .idx
#>   - .stats
#>   outputBinding:
#>     glob: $(inputs.out)
  1. Panel of normals

This step is to create a GenomicsDB and then combine to a VCF output for the panel of normals from all the normal Mutect2 calls. A cwl pipeline GPoN was built to create the panel VCF.

runs(GPoN)
#> List of length 2
#> names(2): GenomicsDB PoN
  1. Mutect2 and variant filtering

This pipeline includes two main steps. First we call a large set of candidate somatic variants, then filter them by estimated contamination and orientation bias artifacts. We can plot the Mutect2PL pipeline to show the details.

plotCWL(Mutect2PL)

5 SessionInfo

sessionInfo()
#> R version 4.0.2 (2020-06-22)
#> Platform: x86_64-pc-linux-gnu (64-bit)
#> Running under: Ubuntu 18.04.4 LTS
#> 
#> Matrix products: default
#> BLAS:   /home/biocbuild/bbs-3.11-bioc/R/lib/libRblas.so
#> LAPACK: /home/biocbuild/bbs-3.11-bioc/R/lib/libRlapack.so
#> 
#> locale:
#>  [1] LC_CTYPE=en_US.UTF-8       LC_NUMERIC=C              
#>  [3] LC_TIME=en_US.UTF-8        LC_COLLATE=C              
#>  [5] LC_MONETARY=en_US.UTF-8    LC_MESSAGES=en_US.UTF-8   
#>  [7] LC_PAPER=en_US.UTF-8       LC_NAME=C                 
#>  [9] LC_ADDRESS=C               LC_TELEPHONE=C            
#> [11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C       
#> 
#> attached base packages:
#> [1] parallel  stats4    stats     graphics  grDevices utils     datasets 
#> [8] methods   base     
#> 
#> other attached packages:
#> [1] dplyr_1.0.1          RcwlPipelines_1.4.5  BiocFileCache_1.12.1
#> [4] dbplyr_1.4.4         Rcwl_1.4.8           S4Vectors_0.26.1    
#> [7] BiocGenerics_0.34.0  yaml_2.2.1           BiocStyle_2.16.0    
#> 
#> loaded via a namespace (and not attached):
#>  [1] httr_1.4.2          tidyr_1.1.1         bit64_4.0.2        
#>  [4] jsonlite_1.7.0      R.utils_2.9.2       shiny_1.5.0        
#>  [7] assertthat_0.2.1    BiocManager_1.30.10 base64url_1.4      
#> [10] blob_1.2.1          progress_1.2.2      pillar_1.4.6       
#> [13] RSQLite_2.2.0       backports_1.1.8     glue_1.4.1         
#> [16] digest_0.6.25       RColorBrewer_1.1-2  promises_1.1.1     
#> [19] checkmate_2.0.0     htmltools_0.5.0     httpuv_1.5.4       
#> [22] R.oo_1.23.0         pkgconfig_2.0.3     bookdown_0.20      
#> [25] DiagrammeR_1.0.6.1  purrr_0.3.4         xtable_1.8-4       
#> [28] brew_1.0-6          later_1.1.0.1       BiocParallel_1.22.0
#> [31] tibble_3.0.3        generics_0.0.2      ellipsis_0.3.1     
#> [34] withr_2.2.0         cli_2.0.2           magrittr_1.5       
#> [37] crayon_1.3.4        mime_0.9            memoise_1.1.0      
#> [40] evaluate_0.14       R.methodsS3_1.8.0   fansi_0.4.1        
#> [43] tools_4.0.2         data.table_1.13.0   prettyunits_1.1.1  
#> [46] hms_0.5.3           lifecycle_0.2.0     stringr_1.4.0      
#> [49] compiler_4.0.2      rlang_0.4.7         debugme_1.1.0      
#> [52] rstudioapi_0.11     rappdirs_0.3.1      htmlwidgets_1.5.1  
#> [55] visNetwork_2.0.9    igraph_1.2.5        rmarkdown_2.3      
#> [58] codetools_0.2-16    DBI_1.1.0           curl_4.3           
#> [61] R6_2.4.1            knitr_1.29          fastmap_1.0.1      
#> [64] bit_4.0.4           utf8_1.1.4          stringi_1.4.6      
#> [67] Rcpp_1.0.5          vctrs_0.3.2         batchtools_0.9.13  
#> [70] tidyselect_1.1.0    xfun_0.16