Revision cbf684a80bb018a2dcc72ba90f80ddebef5461bd authored by isaacovercast on 15 August 2016, 19:25 UTC, committed by isaacovercast on 15 August 2016, 19:25 UTC
1 parent c52c4e0
easySFS.py
``````#!/usr/bin/env python

'''
this script only retains bi-allelic SNPs.
'''
from __future__ import print_function
import matplotlib
matplotlib.use('PDF')
from collections import Counter
from itertools import combinations
import pandas as pd
import numpy as np
import argparse
import shutil
import copy
import sys
import os

import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages

msg = """
Running preview mode. We will print out the results for # of segregating sites
for multiple values of projecting down for each population. The dadi
manual recommends maximizing the # of seg sites for projections, but also
a balance must be struck between # of seg sites and sample size.

For each population you should choose the value of the projection that looks
best and then rerun easySFS with the `--proj` flag.
"""
print(msg)
for pop in pops:
print(pop)
seg_sites = {}
for x in range(2,len(pops[pop])):
fs =  dadi.Spectrum.from_data_dict(dd, [pop], [x], polarized=False)
s = fs.S()
seg_sites[x] = round(s)
print(x, end="\t")
print("")
for x in range(2,len(pops[pop])):
print(seg_sites[x], end="\t")
print("\n")

def dadi_oneD_sfs_per_pop(dd, pops, proj, outdir, prefix):
fsc_dir = os.path.join(outdir, "fastsimcoal2")
for i, pop in enumerate(pops):
print("Doing 1D sfs - {}".format(pop))

## Convert each 1D sfs to fsc format
fsc_oneD_filename = os.path.join(fsc_dir, pop+"_MAFpop0.obs")
with open(fsc_oneD_filename, 'w') as outfile:
outfile.write("1 observation\n")
outfile.write("\t".join(["d0_"+str(x) for x in xrange(proj[i]+1)]) + "\n")
## Grab the fs data from the dadi sfs
outfile.write("\n")

def dadi_twoD_sfs_combinations(dd, pops, proj, outdir, prefix, verbose):
fsc_dir = os.path.join(outdir, "fastsimcoal2")
## All combinations of pairs of populations
popPairs = list(combinations(pops, 2))
## All combinations of corresponding projection values
## This is hackish, it really relies on the fact that `combinations`
## is generating combinations in the same order for pops and projs
projPairs = list(combinations(proj, 2))
## Make a dict for pop indices. this is a mapping of population labels
## to values (ie. {'pop1':1, 'pop2',2}) for labeling the fsc file names
popidx = {}
for i, pop in enumerate(pops):
popidx[pop] = i
if verbose: print("Population pairs - {}".format(popPairs))
if verbose: print("Projections for each pop pair - {}".format(projPairs))
for i, pair in enumerate(popPairs):
print("Doing 2D sfs - {}".format(pair))
fs = dadi.Spectrum.from_data_dict(dd, list(pair), list(projPairs[i]), polarized=False)

## Convert each 2D sfs to fsc format
## NB: FSC joint format file names look like this: <prefix>_jointMAFpop1_0.obs
## Where the first pop specified is listed in the rows and the second pop
## specified is listed in the columns.
fsc_twoD_filename = os.path.join(fsc_dir, prefix+"_jointMAFpop{}_{}.obs".format(popidx[pair], popidx[pair]))
with open(fsc_twoD_filename, 'w') as outfile:
outfile.write("1 observation\n")
## Format column headers (i.e. d0_0 d0_1 d0_2 .. d0_n for deme 0 up to sample size of n)
outfile.write("\t" + "\t".join(["d{}_".format(popidx[pair]) + str(x) for x in xrange(projPairs[i]+1)]) + "\n")

row_headers = ["d{}_".format(popidx[pair]) + str(x) for x in xrange(projPairs[i]+1)]
## Read in the joint fs from dadi and format it nice for fsc
## Get the second line of the dadi-style sfs which contains the data
## The length of each row is determined by the number of columns which == the size of the projection for pop2
## Have to add 1 to the value of the projection because xrange stops after 'n' elements
## but we want all n+1 elements from 0,1,2,..,n
row_size = projPairs[i] + 1
## Slice the row data into evenly sized chunks based on the number of columns
rows = [row_data[i:i + row_size] for i in xrange(0, len(row_data), row_size)]
## Sanity check. Make sure the number of rows you got is the same number you're expecting
## to get (# rows should == size of pop0 projection)
print("FSC Joint SFS failed for - {}".format(pair))
print("Row data - {}".format(rows))
return
else:
pass
## Write out each row to the file
outfile.write(row_head + "\t" + " ".join(rows[i]) + "\n")

def dadi_multiSFS(dd, pops, proj, outdir, prefix):
print("Doing multiSFS for all pops")
fsc_dir = os.path.join(outdir, "fastsimcoal2")
fs = dadi.Spectrum.from_data_dict(dd, pops, proj, polarized=False)

## Convert to fsc multiSFS format
fsc_multi_filename = os.path.join(fsc_dir, prefix + "_MSFS.obs")
with open(fsc_multi_filename, 'w') as outfile:
outfile.write("1 observations. No. of demes and sample sizes are on next line.\n")
outfile.write(str(len(pops)) + "\t" + " ".join([str(x) for x in proj]) + "\n")
outfile.write("\n")

def oneD_sfs_per_pop(dd, pops, outdir, prefix):
for pop in pops:
allele_counts = [dd[x]["calls"][pop] for x in dd.keys()]
#        print(allele_counts)
counts = Counter([x for x in allele_counts])
print(pop, counts)
counts = Counter([x for x in allele_counts])
print(pop, counts)

dd = {}

## Get genotype counts for each population
for row in genotypes.iterrows():
## iterrows() returns a tuple for some reason
row = row

calls = {}
for pop in pops.keys():
pop_genotypes = [row[x] for x in pops[pop]]
ref_count = sum([x == "0/0" or x == "0|0" for x in pop_genotypes]) * ploidy
alt_count = sum([x == "1/1" or x == "1|1" for x in pop_genotypes]) * ploidy
## Haploids shouldn't have hets in the vcf
het_count = sum([x == "1/0" or x == "1|0" for x in pop_genotypes])

ref_count += het_count
alt_count += het_count
calls[pop] = (ref_count, alt_count)

dd[row["#CHROM"]+"-"+row["POS"]] =\
{"segregating":[row["REF"], row["ALT"]],\
"calls":calls,\
"outgroup_allele":row["REF"]}
return dd

## Counter to track which locus we're evaluating and a list
## to hold all lines for each locus so we can randomly
## select one snp per locus if necessary
cur_loc_number = -1
cur_loc_snps = []

infile = open(vcf_name, 'r')
infile.close()

for line in lines:
if line.startswith("#CHROM"):

## Just get the data lines, not the comments
lines = [x for x in lines if not x.startswith('#')]
if verbose:
print("  Number of snps in input file: {}".format(len(lines)))

## Randomly select one snp per locus
if not all_snps:
print("  Sampling one snp per locus")
loci_nums = set([x.split() for x in lines])
loc_dict = {}
for loc in loci_nums:
loc_dict[loc] = []

## populate the loci dict
for line in lines:
loc_dict[line.split()].append(line)

lines = []
for loc in loc_dict.values():
line = np.random.choice(loc, 1)
lines.append(line)

## Sanity check.
## Some snp calling pipelines use the vcf Chrom/pos information to
## all snps to one chrom and use pos/ID (tassel).
## If the user chooses to randomly sample one snp per block and the
## VCF doesn't use Chrom to indicate RAD loci then it'll just
## sample one snp for the whole dataset.
if len(loc_dict) == 1:
msg = """
VCF file uses non-standard Chrom/pos information.
We assume that Chrom indicates RAD loci and pos indicates snps within each locus
The VCF file passed does not have rad locus info in the Chrom field.

You can re-run the easySFS conversion with the `-a` flag to use all snps in the conversion."""
sys.exit(msg)

if verbose:
print("  Using n independent snps: {}".format(len(lines)))

## lines now here has a list of either all snps in the input
## or a subset that includes only one snp per locus
genotypes = pd.DataFrame([x.split() for x in lines], columns=header.split())
return genotypes

def get_inds_from_input(vcf_name, verbose):
# Read in the vcf file and grab the line with the individual names
# Add the 'U' to handle opening files in universal mode, squashes the
# windows/mac/linux newline issue.
try:
with open(vcf_name, 'rU') as infile:
for line in infile:
if line.startswith('#'):
if line.startswith('#CHROM'):
row = line.strip().split()
# VCF file format spec says that if the file contains genotype
# data then "FORMAT" will be the last column header before
# sample IDs start
startcol = row.index('FORMAT')
indnames = [x for x in row[startcol+1:]]
else:
pass
else:
break
except Exception as inst:
msg = """
Problem reading individuals from input VCF file."""
print("Error - {}".format(inst))
raise

return indnames

def check_inputs(ind2pop, indnames):
## Make sure all samples are present in both pops file and VCF, give the user the option
## to bail out if something is goofy
pop_set = set(ind2pop.keys())
vcf_set = set(indnames)

if not pop_set == vcf_set:
print("\nSamples in pops file not present in VCF: {}"\
.format(", ".join(pop_set.difference(vcf_set))))
print("Samples in VCF not present in pops file: {}"\
.format(", ".join(vcf_set.difference(pop_set))))

cont = raw_input("\nContinue, excluding samples not in both pops file and VCF? (yes/no)\n")
while not cont in ["yes", "no"]:
cont = raw_input("\nContinue, excluding samples not in both pops file and VCF? (yes/no)\n")
if cont == "no":
sys.exit()

def get_populations(pops_file, verbose=False):
# Here we need to read in the individual population
# assignments file and do this:
# - populate the locs dictionary with each incoming population name
# - populate another dictionary with individuals assigned to populations
# Add the 'U' to handle opening files in universal mode, squashes the
# windows/mac/linux newline issue.

try:
with open(pops_file, 'rU') as popsfile:
ind2pop = {}
pops = {}

## Get all the populations
for line in lines:
pops.setdefault(line.split(), [])

for line in lines:
ind = line.split()
pop = line.split()
ind2pop[ind] = pop
pops[pop].append(ind)

print("Processing {} populations - {}".format(len( pops ), pops.keys()))
if(verbose):
for pop,ls in pops.items():
print(pop, ls)

except Exception as inst:
msg = """
Problem reading populations file. The file should be plain text with one
individual name and one population name per line, separated by any amount of
white space. There should be no header line in this file.
An example looks like this:

ind1    pop1
ind2    pop1
ind3    pop2
ind4    pop2"""
print(msg)
print("    File you specified is: ".format(pops_file))
print("    Error - {}".format(inst))
raise

return ind2pop, pops

def parse_command_line():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""\n
""")

help="Keep all snps (default == False)")

help="name of the VCF input file being converted")

help="Input file containing population assignments per individual")

help="List of values for projecting populations down to different sample sizes")

help="Preview the number of segragating sites per population for different projection values.")

help="Directory to write output SFS to")

help="Specify ploidy. Default is 2. Only other option is 1 for haploid.")

help="Prefix for all output SFS files names.")

help="minimum genotype quality tolerated", default=20)

help="Force overwriting directories and existing files.")

help="Set verbosity. Dump tons of info to the screen")

## if no args then return help message
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)

## parse args
args = parser.parse_args()
return args

def init(args):
## Set up output directory and output prefix
outdir = args.outdir
if os.path.exists(outdir) and args.force == False:
print("\nOutput directory exists. Use -f to override.\n")
sys.exit()
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
os.mkdir(os.path.join(outdir, "fastsimcoal2"))

if not args.prefix:
prefix = args.vcf_name.split('/')[-1].split('.')
else:
prefix = args.prefix
if args.verbose:
print("Prefix - {}".format(prefix))

return outdir, prefix

def main():
args = parse_command_line()

if args.verbose:
print("Input Arguments:\n\t{}".format(args))

## Set up output directory and output prefix
if args.preview:
if args.verbose: print("Doing preview so skipping directory initialization")
else:
outdir, prefix = init(args)

## Get populations and populations assignments for individuals
## ind2pop - a dictionary mapping individuals to populations
## pops - a dictionary of populations and all inds w/in them
ind2pop, pops = get_populations(args.populations, args.verbose)

## Read in the names of individuals present in the vcf file
indnames = get_inds_from_input(args.vcf_name, args.verbose)

## Check whether inds exist in the population mapping and input vcf
## files. Give the user an opportunity to bail if there is a mismatch.
if not args.force:
check_inputs(ind2pop, indnames)

## Reads the vcf and returns a pandas dataframe
verbose=args.verbose)

dd = make_datadict(genotypes, pops=pops, ploidy=args.ploidy, verbose=args.verbose)
with open(os.path.join(args.outdir, "datadict.txt"), 'w') as outfile:
for x,y in dd.items():
outfile.write(x+str(y)+"\n")

## Do preview of various projections to determine good values
if args.preview:
sys.exit()

elif args.projections:
## Validate values passed in for projecting
proj = [int(x) for x in args.projections.split(",")]
if not len(pops) == len(proj):

msg = "You must pass in the same number of values for projection as you have populations specified"
msg += "\n\nN pops = {}\nN projections = {}\nProjections = {}".format(len(pops), len(proj), proj)
sys.exit(msg)

## Create 1D sfs for each population

## Create pairwise 2D sfs for each population
dadi_twoD_sfs_combinations(dd, pops, proj=proj, outdir=outdir, prefix=prefix, verbose=args.verbose)

## Create the full multiSFS for all popuations combined 