Skip to content
Snippets Groups Projects
Snakefile 43.7 KiB
Newer Older
"""General purpose RNA-Seq analysis pipeline developed by the Zavolan Lab"""
import pandas as pd
Dominik Burri's avatar
Dominik Burri committed
import shutil
# Get sample table
samples_table = pd.read_csv(
    config['samples'],
    header=0,
    index_col=0,
    comment='#',
    engine='python',
    sep="\t",
)

def get_sample(column_id, search_id=None, search_value=None):
    if search_id:
        if search_id == 'index':
            return str(samples_table[column_id][samples_table.index == search_value][0])
        else:
            return str(samples_table[column_id][samples_table[search_id] == search_value][0])
    else:
        return str(samples_table[column_id][0])
# Global config
localrules: start, finish, rename_star_rpm_for_alfa, prepare_multiqc_config
if cluster_config:
    os.makedirs(
        os.path.join(
            os.getcwd(),
            os.path.dirname(cluster_config['__default__']['out']),
        ),
# Include subworkflows
include: os.path.join("workflow", "rules", "paired_end.snakefile.smk")
include: os.path.join("workflow", "rules", "single_end.snakefile.smk")
        multiqc_report = os.path.join(
            config['output_dir'],
            "multiqc_summary"),
        bigWig = expand(
            os.path.join(
                config["output_dir"],
                "samples",
                "{sample}",
                "bigWig",
                "{unique_type}",
                "{sample}_{unique_type}_{strand}.bw"),
            sample=pd.unique(samples_table.index.values),
            strand=["plus", "minus"],
            unique_type=["Unique", "UniqueMultiple"]),
        salmon_merge_genes = expand(
            os.path.join(
                config["output_dir"],
                "summary_salmon",
                "quantmerge",
                "genes_{salmon_merge_on}.tsv"),
            salmon_merge_on=["tpm", "numreads"]),
        salmon_merge_transcripts = expand(
            os.path.join(
                config["output_dir"],
                "summary_salmon",
                "quantmerge",
                "transcripts_{salmon_merge_on}.tsv"),
            salmon_merge_on=["tpm", "numreads"]),
        kallisto_merge_transcripts = os.path.join(
            config["output_dir"],
            "summary_kallisto",
            "transcripts_tpm.tsv"),
        kallisto_merge_genes = os.path.join(
            config["output_dir"],
            "summary_kallisto",
            "genes_tpm.tsv")

rule start:
    '''
       Get samples
    '''
    input:
        reads = lambda wildcards:
            expand(
                pd.Series(
                    samples_table.loc[wildcards.sample, wildcards.mate]
                ).values)

    output:
        reads = os.path.join(
            config["output_dir"],
            "samples",
            "{sample}",
            "start",
            "{sample}.{mate}.fastq.gz")

    log:
        stderr = os.path.join(
            config["log_dir"],
            "samples",
            "{sample}",
            "start_{sample}.{mate}.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "samples",
            "{sample}",
            "start_{sample}.{mate}.stdout.log")

    singularity:
        "docker://bash:5.0.16"

    shell:
        1> {log.stdout} 2> {log.stderr} "


rule fastqc:
    '''
        A quality control tool for high throughput sequence data
    '''
    input:
        reads = os.path.join(
            config["output_dir"],
            "samples",
            "{sample}",
            "start",
            "{sample}.{mate}.fastq.gz")

    output:
        outdir = directory(
            os.path.join(
                config["output_dir"],
                "samples",
                "{sample}",
                "fastqc",
                "{mate}"))

    threads: 2

    singularity:
        "docker://zavolab/fastqc:0.11.9-slim"

    log:
        stderr = os.path.join(
            config["log_dir"],
            "samples",
            "{sample}",
            "fastqc_{mate}.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "samples",
            "{sample}",
            "fastqc_{mate}.stdout.log")

    shell:
        "(mkdir -p {output.outdir}; \
        fastqc --outdir {output.outdir} {input.reads}) \
        1> {log.stdout} 2> {log.stderr}"
    input:
        genome = lambda wildcards:
            get_sample(
                'genome',
                search_id='organism',
                search_value=wildcards.organism),

        gtf = lambda wildcards:
            get_sample(
                'gtf',
                search_id='organism',
                search_value=wildcards.organism)
    output:
        chromosome_info = os.path.join(
            config['star_indexes'],
            "{organism}",
            "{index_size}",
            "STAR_index",
        chromosomes_names = os.path.join(
            config['star_indexes'],
            "{organism}",
            "{index_size}",
            "STAR_index",
    params:
        output_dir = os.path.join(
            config['star_indexes'],
            "{organism}",
            "{index_size}",
        outFileNamePrefix = os.path.join(
            config['star_indexes'],
            "{organism}",
            "{index_size}",
    singularity:
Alex Kanitz's avatar
Alex Kanitz committed
        "docker://zavolab/star:2.7.3a-slim"
    threads: 12
            config['log_dir'],
            "{organism}_{index_size}_create_index_star.stderr.log"),
        stdout = os.path.join(
            config['log_dir'],
            "{organism}_{index_size}_create_index_star.stdout.log")

    shell:
        "(mkdir -p {params.output_dir}; \
        chmod -R 777 {params.output_dir}; \
        STAR \
        --runMode genomeGenerate \
        --sjdbOverhang {params.sjdbOverhang} \
        --genomeDir {params.output_dir} \
        --genomeFastaFiles {input.genome} \
        --runThreadN {threads} \
        --outFileNamePrefix {params.outFileNamePrefix} \
        --sjdbGTFfile {input.gtf}) \
        1> {log.stdout} 2> {log.stderr}"
    """
        Create transcriptome from genome and gene annotations
    """
            get_sample(
                'genome',
                search_id='organism',
                search_value=wildcards.organism),
            get_sample(
                'gtf',
                search_id='organism',
                search_value=wildcards.organism)
            config['output_dir'],
            "transcriptome",
            "{organism}",
            "transcriptome.fa")

    log:
        stderr = os.path.join(
            config['log_dir'],
            "{organism}_extract_transcriptome.log"),
        stdout = os.path.join(
            config['log_dir'],
            "{organism}_extract_transcriptome.log")
        "docker://zavolab/gffread:0.11.7-slim"
    shell:
        "(gffread \
        -w {output.transcriptome} \
        -g {input.genome} {input.gtf}) \
        1> {log.stdout} 2> {log.stderr}"
rule concatenate_transcriptome_and_genome:
    """
        Concatenate genome and transcriptome
    """
    input:
        transcriptome = os.path.join(
            config['output_dir'],
            "transcriptome",
            "{organism}",
            "transcriptome.fa"),

        genome = lambda wildcards:
            get_sample(
                'genome',
                search_id='organism',
                search_value=wildcards.organism)

    output:
        genome_transcriptome = os.path.join(
            config['output_dir'],
            "transcriptome",
            "{organism}",
            "genome_transcriptome.fa")

    singularity:
        "docker://bash:5.0.16"

    log:
        stderr = os.path.join(
            config['log_dir'],
            "{organism}_concatenate_transcriptome_and_genome.stderr.log")

    shell:
        "(cat {input.transcriptome} {input.genome} \
        1> {output.genome_transcriptome}) \
        2> {log.stderr}"


rule create_index_salmon:
    """
        Create index for Salmon quantification
    """
    input:
        genome_transcriptome = os.path.join(
            config['output_dir'],
            "transcriptome",
            "{organism}",
            "genome_transcriptome.fa"),
        chr_names = lambda wildcards:
            os.path.join(
                config['star_indexes'],
                "STAR_index",
                "chrName.txt")
    output:
        index = directory(
            os.path.join(
                config['salmon_indexes'],
                "{organism}",
                "{kmer}",
    singularity:
        "docker://zavolab/salmon:1.1.0-slim"
            config['log_dir'],
            "{organism}_{kmer}_create_index_salmon.stderr.log"),
        stdout = os.path.join(
            config['log_dir'],
            "{organism}_{kmer}_create_index_salmon.stdout.log")

    threads: 8
    shell:
        "(salmon index \
        --transcripts {input.genome_transcriptome} \
        --decoys {input.chr_names} \
        --index {output.index} \
        --kmerLen {params.kmerLen} \
        --threads {threads}) \
        1> {log.stdout} 2> {log.stderr}"
            config['output_dir'],
            "transcriptome",
            "{organism}",
            "transcriptome.fa")

    output:
        index = os.path.join(
            config['kallisto_indexes'],
            "{organism}",
    params:
        output_dir = os.path.join(
            config['kallisto_indexes'],
    singularity:
Alex Kanitz's avatar
Alex Kanitz committed
        "docker://zavolab/kallisto:0.46.1-slim"
            config['log_dir'],
            "{organism}_create_index_kallisto.stderr.log"),
        stdout = os.path.join(
            config['log_dir'],
            "{organism}_create_index_kallisto.stdout.log")

    shell:
        "(mkdir -p {params.output_dir}; \
        chmod -R 777 {params.output_dir}; \
        kallisto index -i {output.index} {input.transcriptome}) \
        1> {log.stdout}  2> {log.stderr}"

rule extract_transcripts_as_bed12:
    input:
        gtf = lambda wildcards:
    output:
        bed12 = os.path.join(
            config['output_dir'],
    singularity:
        "docker://zavolab/zgtf:0.1"
    threads: 1
        stdout = os.path.join(
            config['log_dir'],
            "extract_transcripts_as_bed12.stdout.log"),
            config['log_dir'],
        "(gtf2bed12 \
        --gtf {input.gtf} \
        --transcript_type protein_coding \
        --bed12 {output.bed12}); \
        1> {log.stdout} 2> {log.stderr}"
rule index_genomic_alignment_samtools:
    '''
        Index genome bamfile using samtools
    '''
    input:
        bam = os.path.join(
            config["output_dir"],
            "samples",
            "{sample}.{seqmode}.Aligned.sortedByCoord.out.bam"),
    output:
        bai = os.path.join(
            config["output_dir"],
            "samples",
            "{sample}.{seqmode}.Aligned.sortedByCoord.out.bam.bai")

    singularity:
        "docker://zavolab/samtools:1.10-slim"

    threads: 1

    log:
        stderr = os.path.join(
            config["log_dir"],
            "samples",
            "index_genomic_alignment_samtools.{seqmode}.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "samples",
            "index_genomic_alignment_samtools.{seqmode}.stdout.log")

    shell:
        "(samtools index {input.bam} {output.bai};) \
        1> {log.stdout} 2> {log.stderr}"


rule calculate_TIN_scores:
        Calculate transcript integrity (TIN) score
        bam = lambda wildcards:
            expand(
                os.path.join(
                    config['output_dir'],
                    "samples",
                    "{sample}",
                    "map_genome",
                    "{sample}.{seqmode}.Aligned.sortedByCoord.out.bam"),
                sample=wildcards.sample,
                seqmode=get_sample(
                    'seqmode',
                    search_id='index',
                    search_value=wildcards.sample)),
        bai = lambda wildcards:
            expand(
                os.path.join(
                    config['output_dir'],
                    "samples",
                    "{sample}",
                    "map_genome",
                    "{sample}.{seqmode}.Aligned.sortedByCoord.out.bam.bai"),
                sample=wildcards.sample,
                seqmode=get_sample(
                    'seqmode',
                    search_id='index',
                    search_value=wildcards.sample)),
        transcripts_bed12 = os.path.join(
            config['output_dir'],
    output:
        TIN_score = os.path.join(
            config['output_dir'],
            "samples",
            "{sample}",
            "TIN",
            config['log_dir'],
            "samples",
            "{sample}",
            "calculate_TIN_scores.log")
    threads: 8
    singularity:
        "docker://zavolab/tin_score_calculation:0.2.0-slim"
        -r {input.transcripts_bed12} \
        -c 0 \
        --names {params.sample} \
        -n 100 > {output.TIN_score};) 2> {log.stderr}"
rule merge_TIN_scores:
    """
        Merge TIN scores tables
    """
    input:
        TIN_score = expand(
            os.path.join(
                config['output_dir'],
                "samples",
                "{sample}",
                "TIN",
                "TIN_score.tsv"),
            sample=pd.unique(samples_table.index.values)),

    output:
        TIN_scores_merged = os.path.join(
            config['output_dir'],
            "TIN_scores_merged.tsv")

    log:
        stderr = os.path.join(
            config['log_dir'],
            "merge_TIN_scores.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "merge_TIN_scores.stdout.log")

    params:
        TIN_score_merged_paths = ",".join(expand(
            os.path.join(
                config['output_dir'],
                "samples",
                "{sample}",
                "TIN",
                "TIN_score.tsv"),
            zip,
            sample=[i for i in pd.unique(samples_table.index.values)],
            seqmode=[get_sample('seqmode',
                    search_id='index',
                    search_value=i) for i in pd.unique(samples_table.index.values)]))

    threads: 1

    singularity:
        "docker://zavolab/tin_score_calculation:0.2.0-slim"

    shell:
        "(tin_score_merge.py \
        --input-files {params.TIN_score_merged_paths} \
        --output-file {output.TIN_scores_merged}) \
        1> {log.stdout} 2> {log.stderr}"


rule plot_TIN_scores:
    """
        Generate TIN scores boxplots
    """
    input:
        TIN_scores_merged = os.path.join(
            config['output_dir'],
            "TIN_scores_merged.tsv"),

    output:
        TIN_boxplot_PNG = os.path.join(
            config['output_dir'],
            "TIN_scores_boxplot_mqc.png"),
        TIN_boxplot_PDF = os.path.join(
            config['output_dir'],
            "TIN_scores_boxplot_mqc.pdf")

    params:
        TIN_boxplot_prefix = os.path.join(
            config['output_dir'],
            "TIN_scores_boxplot_mqc")

    log:
        stderr = os.path.join(
            config['log_dir'],
            "plot_TIN_scores.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "plot_TIN_scores.stdout.log")

    threads: 1

    singularity:
        "docker://zavolab/tin_score_calculation:0.2.0-slim"

    shell:
        "(tin_score_plot.py \
        --input-file {input.TIN_scores_merged} \
        --output-file-prefix {params.TIN_boxplot_prefix}) \
        1> {log.stdout} 2> {log.stderr}"


rule salmon_quantmerge_genes:
        salmon_in = expand(
            os.path.join(
                config["output_dir"],
                "samples",
                "{sample}.salmon.{seqmode}",
                "quant.sf"),
            sample=pd.unique(samples_table.index.values),
            seqmode=[get_sample(
                'seqmode',
                search_id='index',
                search_value=i)
                for i in pd.unique(samples_table.index.values)])
            config["output_dir"],
            "summary_salmon",
            "quantmerge",
            "genes_{salmon_merge_on}.tsv")

    params:
        salmon_in = expand(
                "samples",
                "{sample}.salmon.{seqmode}"),
            sample=[i for i in pd.unique(samples_table.index.values)],
            seqmode=[get_sample(
                'seqmode',
                search_id='index',
                search_value=i)
                for i in pd.unique(samples_table.index.values)]),
            sample=pd.unique(samples_table.index.values)),
        salmon_merge_on = "{salmon_merge_on}"
        stderr = os.path.join(
            config["log_dir"],
            "salmon_quantmerge_genes_{salmon_merge_on}.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "salmon_quantmerge_genes_{salmon_merge_on}.stdout.log")

    threads: 1

    singularity:
        "docker://zavolab/salmon:1.1.0-slim"
        --quants {params.salmon_in} \
        --genes \
        --names {params.sample_name_list} \
        --column {params.salmon_merge_on} \
        --output {output.salmon_out};) \
        1> {log.stdout} 2> {log.stderr}"

rule salmon_quantmerge_transcripts:
        Merge transcript quantifications into a single file
        salmon_in = expand(
            os.path.join(
                config["output_dir"],
                "samples",
                "{sample}.salmon.{seqmode}",
            sample=[i for i in pd.unique(samples_table.index.values)],
            seqmode=[get_sample(
                'seqmode',
                search_id='index',
                search_value=i)
                for i in pd.unique(samples_table.index.values)])
            config["output_dir"],
            "summary_salmon",
            "quantmerge",
            "transcripts_{salmon_merge_on}.tsv")

    params:
        salmon_in = expand(
                "samples",
                "{sample}.salmon.{seqmode}"),
            sample=[i for i in pd.unique(samples_table.index.values)],
            seqmode=[get_sample(
                'seqmode',
                search_id='index',
                search_value=i)
                for i in pd.unique(samples_table.index.values)]),
            sample=pd.unique(samples_table.index.values)),
        salmon_merge_on = "{salmon_merge_on}"
        stderr = os.path.join(
            config["log_dir"],
            "salmon_quantmerge_transcripts_{salmon_merge_on}.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "salmon_quantmerge_transcripts_{salmon_merge_on}.stdout.log")

    threads: 1

        "docker://zavolab/salmon:1.1.0-slim"
        --quants {params.salmon_in} \
        --names {params.sample_name_list} \
        --column {params.salmon_merge_on} \
Dominik Burri's avatar
Dominik Burri committed
        1> {log.stdout} 2> {log.stderr}"


rule kallisto_merge_genes:
    '''
        Merge gene quantifications into single file
    '''
    input:
        pseudoalignment = expand(
            os.path.join(
                config["output_dir"],
                "samples",
                "{sample}",
                "quant_kallisto",
                "{sample}.{seqmode}.kallisto.pseudo.sam"),
            zip,
            sample=[i for i in pd.unique(samples_table.index.values)],
            seqmode=[get_sample(
                'seqmode',
                search_id='index',
                search_value=i)
                for i in pd.unique(samples_table.index.values)]),
        gtf = get_sample('gtf')

    output:
            "genes_tpm.tsv"),
        gn_counts = os.path.join(
            config["output_dir"],
            "summary_kallisto",
            "genes_counts.tsv")

    params:
        dir_out = os.path.join(
            config["output_dir"],
            "summary_kallisto"),
        tables = ','.join(expand(
            os.path.join(
                config["output_dir"],
                "samples",
                "{sample}",
                "quant_kallisto",
                "abundance.h5"),
            sample=[i for i in pd.unique(samples_table.index.values)])),
        sample_name_list = ','.join(expand(
            "{sample}",
            sample=pd.unique(samples_table.index.values))),

    log:
        stderr = os.path.join(
            config["log_dir"],
            "kallisto_merge_genes.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "kallisto_merge_genes.stdout.log")

    threads: 1

    singularity:
        "docker://zavolab/merge_kallisto:0.6"

    shell:
        "(merge_kallisto.R \
        --input {params.tables} \
        --names {params.sample_name_list} \
        --txOut FALSE \
        --anno {input.gtf} \
        --output {params.dir_out} \
        --verbose) \
        1> {log.stdout} 2> {log.stderr}"


rule kallisto_merge_transcripts:
    '''
        Merge transcript quantifications into a single files
    '''
    input:
        pseudoalignment = expand(
            os.path.join(
                config["output_dir"],
                "samples",
                "{sample}",
                "quant_kallisto",
                "{sample}.{seqmode}.kallisto.pseudo.sam"),
            zip,
            sample=[i for i in pd.unique(samples_table.index.values)],
            seqmode=[get_sample(
                'seqmode',
                search_id='index',
                search_value=i)
                for i in pd.unique(samples_table.index.values)]),

    output:
            "transcripts_tpm.tsv"),
        tx_counts = os.path.join(
            config["output_dir"],
            "summary_kallisto",
            "transcripts_counts.tsv")

    params:
        dir_out = os.path.join(
            config["output_dir"],
            "summary_kallisto"),
        tables = ','.join(expand(
            os.path.join(
                config["output_dir"],
                "samples",
                "{sample}",
                "quant_kallisto",
                "abundance.h5"),
            sample=[i for i in pd.unique(samples_table.index.values)])),
        sample_name_list = ','.join(expand(
            "{sample}",
            sample=pd.unique(samples_table.index.values))),

    log:
        stderr = os.path.join(
            config["log_dir"],
            "kallisto_merge_transcripts.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "kallisto_merge_transcripts.stdout.log")

    threads: 1

    singularity:
        "docker://zavolab/merge_kallisto:0.6"

    shell:
        "(merge_kallisto.R \
        --input {params.tables} \
        --names {params.sample_name_list} \
        --output {params.dir_out} \
        --verbose) \
        1> {log.stdout} 2> {log.stderr}"


rule pca_salmon:
    input:
        tpm = os.path.join(
            config["output_dir"],
            "summary_salmon",
            "quantmerge",
            "{molecule}_tpm.tsv"),

    params:
        tpm_filter = "0",
        tpm_pseudocount = "1"

    output:
        out = directory(os.path.join(
            config["output_dir"],
            "zpca",
            "pca_salmon_{molecule}"))

    log:
        stderr = os.path.join(
            config["log_dir"],
            "pca_salmon_{molecule}.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "pca_salmon_{molecule}.stdout.log")

    threads: 1

    singularity:
        "docker://zavolab/zpca:0.8"

    shell:
        "(zpca-tpm  \
        --tpm {input.tpm} \
        --tpm-filter {params.tpm_filter} \
        --tpm-pseudocount {params.tpm_pseudocount} \
        --out {output.out} \
        --verbose) \
        1> {log.stdout} 2> {log.stderr}"


rule pca_kallisto:
    input:
        tpm = os.path.join(
            config["output_dir"],
            "summary_kallisto",
            "{molecule}_tpm.tsv")

    params:
        tpm_filter = "0",
        tpm_pseudocount = "1"

    output:
        out = directory(os.path.join(
            config["output_dir"],
            "zpca",
            "pca_kallisto_{molecule}"))

    log:
        stderr = os.path.join(
            config["log_dir"],
            "pca_kallisto_{molecule}.stderr.log"),
        stdout = os.path.join(
            config["log_dir"],
            "pca_kallisto_{molecule}.stdout.log")

    threads: 1

    singularity:
        "docker://zavolab/zpca:0.8"

    shell:
        "(zpca-tpm  \
        --tpm {input.tpm} \
        --tpm-filter {params.tpm_filter} \
        --tpm-pseudocount {params.tpm_pseudocount} \
        --out {output.out} \
        --verbose) \
        1> {log.stdout} 2> {log.stderr}"