From 1d1d7f8cae7dedc119a4566d6c61c7d66423d1d2 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 18 Feb 2015 15:09:57 -0700 Subject: [PATCH] Split scripts up for now, may add all in one scripts later. Added p4SyncMissingFiles.py, so you don't have to do a force sync and redownload everything. --- README.md | 4 +- p4Helper.py | 398 ++++++++++++++++++++++++++++ p4Helper.pyc | Bin 0 -> 13085 bytes p4RemoveUnversioned.py | 586 ++++++++--------------------------------- p4SyncMissingFiles.py | 136 ++++++++++ 5 files changed, 648 insertions(+), 476 deletions(-) create mode 100644 p4Helper.py create mode 100644 p4Helper.pyc create mode 100644 p4SyncMissingFiles.py diff --git a/README.md b/README.md index 952f42b..5942e43 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -p4RemoveUnversioned +p4Tools =================== Removes unversioned files from perforce repository. Script is in beta, works well, but still going through continued testing. There are a few stats at the end, will be putting in more, like number of files/directories checked, so you have an idea how much work was required. @@ -6,6 +6,6 @@ Removes unversioned files from perforce repository. Script is in beta, works wel Concerning benchmarks: I used to have a HDD, now a SSD. So I can't provide valid comparisons to the old numbers until I do them on a computer with a HDD. That said, this single worker implementation runs faster than the old multi-threaded version. Can't wait to further update it, will only continue to get faster. -This script does parse __.p4ignore__ ignore files, compiles the fields as regex, and scans every directory and file against the local and parent __.p4ignore__ files. This is my first time doing something like this, and I just realized this isn't actually correct; I need to update how things are ignored to follow the [spec](http://www.perforce.com/perforce/r12.1/manuals/cmdref/env.P4IGNORE.html), since it's not straight up regex. +~~This script does parse __.p4ignore__ ignore files, compiles the fields as regex, and scans every directory and file against the local and parent __.p4ignore__ files. This is my first time doing something like this, and I just realized this isn't actually correct; I need to update how things are ignored to follow the [spec](http://www.perforce.com/perforce/r12.1/manuals/cmdref/env.P4IGNORE.html), since it's not straight up regex.~~ I need to re-add this to the newer script. **Files are currently permanently deleted, so use this at your own risk.** diff --git a/p4Helper.py b/p4Helper.py new file mode 100644 index 0000000..d50bd63 --- /dev/null +++ b/p4Helper.py @@ -0,0 +1,398 @@ +#!/usr/bin/python +# -*- coding: utf8 -*- +# author : Brian Ernst +# python_version : 2.7.6 and 3.4.0 +# ================================= + +import datetime, inspect, marshal, multiprocessing, optparse, os, re, stat, subprocess, sys, threading + +# trying ntpath, need to test on linux +import ntpath + + +try: input = raw_input +except: pass + + +#============================================================== +re_remove_comment = re.compile( "#.*$" ) +def remove_comment( s ): + return re.sub( re_remove_comment, "", s ) + +def singular_pulural( val, singular, plural ): + return singular if val == 1 else plural + + +#============================================================== +def enum(*sequential, **named): + enums = dict(zip(sequential, range(len(sequential))), **named) + return type('Enum', (), enums) + +MSG = enum('SHUTDOWN', 'PARSE_DIRECTORY', 'RUN_FUNCTION') + +p4_ignore = ".p4ignore" + +main_pid = os.getpid( ) + + +#============================================================== +#if os.name == 'nt' or sys.platform == 'cygwin' +def basename( path ): + # TODO: import based on platform + # https://docs.python.org/2/library/os.path.html + # posixpath for UNIX-style paths + # ntpath for Windows paths + # macpath for old-style MacOS paths + # os2emxpath for OS/2 EMX paths + + #return os.path.basename( path ) + return ntpath.basename( path ) + +def normpath( path ): + return ntpath.normpath( path ) + +def join( patha, pathb ): + return ntpath.join( patha, pathb ) + +def splitdrive( path ): + return ntpath.splitdrive( path ) + +#============================================================== +def get_ignore_list( path, files_to_ignore ): + # have to split path and test top directory + dirs = path.split( os.sep ) + + ignore_list = [ ] + + for i, val in enumerate( dirs ): + path_to_find = os.sep.join( dirs[ : i + 1] ) + + if path_to_find in files_to_ignore: + ignore_list.extend( files_to_ignore[ path_to_find ] ) + + return ignore_list + +def match_in_ignore_list( path, ignore_list ): + for r in ignore_list: + if re.match( r, path ): + return True + return False + + +#============================================================== +def call_process( args ): + return subprocess.call( args.split( ), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) + +def try_call_process( args, path=None ): + try: + subprocess.check_output( args.split( ), shell=False, cwd=path ) + return 0 + except subprocess.CalledProcessError: + return 1 + +use_bytearray_str_conversion = type( b"str" ) is not str +def get_str_from_process_stdout( line ): + if use_bytearray_str_conversion: + return ''.join( map( chr, line ) ) + else: + return line + +def parse_info_from_command( args, value, path = None ): + """ + + :rtype : string + """ + proc = subprocess.Popen( args.split( ), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path ) + for line in proc.stdout: + line = get_str_from_process_stdout( line ) + + if not line.startswith( value ): + continue + return line[ len( value ) : ].strip( ) + return None + +def get_p4_py_results( args, path = None ): + results = [] + proc = subprocess.Popen( [ 'p4', '-G' ] + args.split( ), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path ) + try: + while True: + output = marshal.load( proc.stdout ) + results.append( output ) + except EOFError: + pass + finally: + proc.stdout.close() + return results + + +#============================================================== +def fail_if_no_p4(): + if call_process( 'p4 -V' ) != 0: + print( 'Perforce Command-line Client(p4) is required for this script.' ) + sys.exit( 1 ) + +# Keep these in mind if you have issues: +# https://stackoverflow.com/questions/16557908/getting-output-of-a-process-at-runtime +# https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running +def get_client_set( path ): + files = set( [ ] ) + + make_drive_upper = True if os.name == 'nt' or sys.platform == 'cygwin' else False + + command = "p4 fstat ..." + + proc = subprocess.Popen( command.split( ), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path ) + for line in proc.stdout: + line = get_str_from_process_stdout( line ) + + clientFile_tag = "... clientFile " + if not line.startswith( clientFile_tag ): + continue + + local_path = normpath( line[ len( clientFile_tag ) : ].strip( ) ) + if make_drive_upper: + drive, path = splitdrive( local_path ) + local_path = ''.join( [ drive.upper( ), path ] ) + + files.add( local_path ) + + proc.wait( ) + + for line in proc.stderr: + if "no such file" in line: + continue + raise Exception(line) + + return files + +def get_client_root( ): + """ + + :rtype : string + """ + command = "p4 info" + + proc = subprocess.Popen( command.split( ), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) + for line in proc.stdout: + line = get_str_from_process_stdout( line ) + + clientFile_tag = "Client root: " + if not line.startswith( clientFile_tag ): + continue + + local_path = normpath( line[ len( clientFile_tag ) : ].strip( ) ) + + return local_path + return None + +class P4Workspace: + """ + Use this class when working in a workspace. + Makes sure the environmentals are setup correctly, and that you aren't working on a non-perforce directory accidentally; + otherwise you can delete files that shouldn't be deleted. Ex: + + with P4Workspace( cwd ): #sets current workspace to cwd, or fails + # do stuff here + # on exit reverts to previous set workspace + """ + + def __init__( self, directory): + self.directory = directory + + def __enter__( self ): + # get user + #print("\nChecking p4 info...") + result = get_p4_py_results('info') + if len(result) == 0 or b'userName' not in result[0].keys(): + print("Can't find perforce info, is it even setup?") + sys.exit(1) + username = get_str_from_process_stdout(result[0][b'userName']) + client_host = get_str_from_process_stdout(result[0][b'clientHost']) + #print("|Done.") + + # see if current directory is set to current workspace, if not, set it to current workspace. + client_root = get_client_root() + ldirectory = self.directory.lower() + oldworkspace_name = None + + if client_root is None or not ldirectory.startswith(client_root.lower()): + #print("\nCurrent directory not in client view, checking other workspaces for user '" + username + "' ...") + + oldworkspace_name = parse_info_from_command('p4 info', 'Client name: ') + + # get user workspaces + result = get_p4_py_results('workspaces -u ' + username) + workspaces = [] + for r in result: + whost = get_str_from_process_stdout(r[b'Host']) + if whost is not None and len(whost) != 0 and client_host != whost: + continue + workspace = {'root': get_str_from_process_stdout(r[b'Root']), 'name': get_str_from_process_stdout(r[b'client'])} + workspaces.append(workspace) + + del result + + # check current directory against current workspace, see if it matches existing workspaces. + for w in workspaces: + wname = w['name'] + wlower = w['root'].lower() + if ldirectory.startswith(wlower): + # set current directory, don't forget to revert it back to the existing one + #print("|Setting client view to: " + wname) + + if try_call_process( 'p4 set P4CLIENT=' + wname ): + #print("|There was a problem trying to set the p4 client view (workspace).") + sys.exit(1) + break + else: + # TODO: look up workspace/users for this computer + print( "Couldn't find a workspace root that matches the current directory for the current user." ) + sys.exit(1) + #print("|Done.") + self.oldworkspace_name = oldworkspace_name + return self + + def __exit__( self, type, value, tb ): + # If we changed the current workspace, switch it back. + if self.oldworkspace_name is not None: + c.write("\nReverting back to original client view...") + # set workspace back to the original one + if try_call_process( 'p4 set P4CLIENT=' + self.oldworkspace_name ): + # error_count += 1 # have console log errors + # if not options.quiet: + print("There was a problem trying to restore the set p4 client view (workspace).") + sys.exit(1) + #else: + # if not options.quiet: + # c.write("|Reverted client view back to '" + self.oldworkspace_name + "'.") + #if not options.quiet: + # c.write("|Done.") + +#============================================================== +class PTable( list ): + def __init__( self, *args ): + list.__init__( self, args ) + self.mutex = multiprocessing.Semaphore( ) + +class PDict( dict ): + def __init__( self, *args ): + dict.__init__( self, args ) + self.mutex = multiprocessing.Semaphore( ) + + +#============================================================== +# TODO: Create a child thread for triggering autoflush events +# TODO: Hook console into stdout so it catches print +class Console( threading.Thread ): + MSG = enum('WRITE', 'FLUSH', 'SHUTDOWN', 'CLEAR' ) + + # auto_flush_time is time in milliseconds since last flush to trigger a flush when writing + def __init__( self, auto_flush_num = None, auto_flush_time = None ): + threading.Thread.__init__( self ) + self.buffers = {} + self.buffer_write_times = {} + self.running = True + self.queue = multiprocessing.JoinableQueue( ) + self.auto_flush_num = auto_flush_num if auto_flush_num is not None else -1 + self.auto_flush_time = auto_flush_time * 1000 if auto_flush_time is not None else -1 + self.shutting_down = False + + def write( self, data, pid = None ): + self.queue.put( ( Console.MSG.WRITE, pid if pid is not None else os.getpid(), data ) ) + + def writeflush( self, data, pid = None ): + pid = pid if pid is not None else os.getpid() + self.queue.put( ( Console.MSG.WRITE, pid, data ) ) + self.queue.put( ( Console.MSG.FLUSH, pid ) ) + + def flush( self, pid = None ): + self.queue.put( ( Console.MSG.FLUSH, pid if pid is not None else os.getpid() ) ) + + def clear( self, pid = None ): + self.queue.put( ( Console.MSG.CLEAR, pid if pid is not None else os.getpid() ) ) + + def __enter__( self ): + self.start( ) + return self + + def __exit__( self, type, value, tb ): + self.queue.put( ( Console.MSG.SHUTDOWN, ) ) + self.queue.join( ) + + def run( self ): + while True: + data = self.queue.get( ) + event = data[0] + + if event == Console.MSG.SHUTDOWN: + # flush remaining buffers before shutting down + for ( pid, buffer ) in self.buffers.items( ): + for line in buffer: + print( line ) + self.buffers.clear( ) + self.buffer_write_times.clear( ) + self.queue.task_done( ) + + #print(self.queue.qsize()) + #print(self.queue.empty()) + break + + elif event == Console.MSG.WRITE: + pid, s = data[ 1 : ] + + if pid not in self.buffers: + self.buffers[ pid ] = [] + if pid not in self.buffer_write_times: + self.buffer_write_times[ pid ] = datetime.datetime.now( ) + self.buffers[ pid ].append( s ) + + if self.auto_flush_num >= 0 and len( self.buffers[ pid ] ) >= self.auto_flush_num: + self.flush( pid ) + elif self.auto_flush_time >= 0 and ( datetime.datetime.now( ) - self.buffer_write_times[ pid ] ).microseconds >= self.auto_flush_time: + self.flush( pid ) + # TODO: if buffer is not empty and we don't auto flush on write, sleep until a time then auto flush according to auto_flush_time + elif event == Console.MSG.FLUSH: + pid = data[ 1 ] + if pid in self.buffers: + for line in self.buffers[ pid ]: + print( line ) + self.buffers.pop( pid, None ) + self.buffer_write_times[ pid ] = datetime.datetime.now( ) + elif event == Console.MSG.CLEAR: + pid = data[ 1 ] + if pid in self.buffers: + self.buffers.pop( pid, None ) + + self.queue.task_done( ) + + +#============================================================== +# class Task( threading.Event ): +# def __init__( data, cmd = None ): +# threading.Event.__init__( self ) + +# self.cmd = cmd if cmd is None MSG.RUN_FUNCTION +# self.data = data + +# def isDone( self ): +# return self.isSet() + +# def join( self ): +# self.wait( ) + + +#============================================================== +class Worker( threading.Thread ): + def __init__( self, console, queue, commands ): + threading.Thread.__init__( self ) + + self.queue = queue + self.commands = commands + + def run( self ): + while True: + ( cmd, data ) = self.queue.get( ) + if not self.commands[cmd](data): + self.queue.task_done( ) + break + self.queue.task_done( ) diff --git a/p4Helper.pyc b/p4Helper.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4884754adb315b7046b77cc97aafc988f1eac1b GIT binary patch literal 13085 zcmcgyO>i8?b?%v6{4B5_Kma5NiWbKtC4m$P%4JA$Xj!HNfIqghga)8jMv)p1X9mOo zyEBXFSpskXmnE_sIkBC;N}S5CQmLeJNK%zUPR>15IpmN-ZmyhCm2+~B^L?-PPb8d# zgcXR}dehTCuV26Sz1Od&@Sh_?b@gv6folB~@&Bv%X=N)F;-95zO09NiORZY%dRwj9 zZMvXV3vIfnR*P-APp$T~>3+4^-=+uD>Oh+wRI7t+x};W1ZF)$p4z=lFwK}YJhg4Kn zt0StK3@FR85=*@w^{b5|Dzw!@ur{i~f_emg$5dDp;;0Jygcw)RxC${prX5rd!S93$ zG25hy`qaiT^$@*|t8hpfPpNQNh!ZL-3o)(25g|^h@Q4u4sc=+?Qz{%2;&YT%cvL+C zC#O~9sBoNIMW0ujnCOfOC#2a~X*Ma%&Ix@?=oeIYTpGNn!YLtMQsD_9W>g3U(D5W6 z!Rj|x&#Uk`6EoR_ooa9BWRD3Ge)g;FyGHsNy? z<)n4-8wfXE<`sLeHR3Qs|dKUAuB;DN8q^AlG*qbFTEdojiy# z9j8eYqV7giYed;>V>ds7_MaY?JZdtatbh#g$H#g6^dt%}4c0epm5r%AEPc;zuOKEj zh8gw}{vY_Ye2`t^q zC-=?1)v!fulJ7Pmv#CRP($Q8EFJcIV%@e;Kg+fIb{4o2-sW`@SoP-a`I%kRCmv7u& zS-4fLud>wItMp4NVUp@P#yC*ONyPHieJso%M6j>ic zftA&T6KbnFjp7hIokaI9;@4I@N{T-XS=oM!be!n?ugc-~VY(4SS-wjarW;1Ui)z2G)pVms>)jJeo^oL0`~7MU{{UM3 zDtkfg71Tx_A5l4t*Vfhx3jeC8Z$YsEGJ=O}f#%u;lv8a$Icnsz6k1%A`FZ40%3=V~ zP97!U4A&wu60=*2YmxTy)H8O~!j4Sm!)RveyjrYtf%DjviD{bLdv;rkF(jU!5#(w4 zfTh;#GSgR3r3wDZ9=H4LY3rn2woceVTX(6EvJZ49JLf2^g}bK&A&vP%z`##9fdRtl z^45T=LNsmFW_OobCi1KMd9W^vUdft~R9*9H+Tfx{$$gP+*&#c`Iw#Rz=JMjCcd=hb zy&goNtPyM68nimlK}pLpUjWfDQ>vRdIo2RngIf^dp=Gxn0z2wFOq;pvI>;!?X1MHJ ztfhIck)=VTwJbaEYc=;YU$Vv0&85XIDEis`!vH_kgu{ES`IhnAHg#?$f1Oowsk<*7 zv`|?k;mNWs1cuAiAq_Q^|Lxx4Q{C=pg$+XgnF^%=8Ruh{Dw!;|J%GeHD zxL-gaE+$xyf=v&?YBY0^gL{#$#!<|J_b5cXLSZdtS(*t5x^o=haNwK^_GY_YuYA6N zaW%I{Sq1AfbYaXgCY@&;t>7k!R-6L?A!LYK0YVbBp|MeX0LL<@Wf2IaTyaq-qXVZv zHFf0O+sz|C%lutW=NTfmb`)mtqJUO)zhT6;o^^Gi7AMgm?0{fId*_^MSz2%F1}dnr zAKe)O zu+7LoIrV~O=6)5#A@+x%NW;&dA91nm6^?Txb) zcE;?ZwRKHZtI1JdLP-GOVf#>71;?h>#t^sBdLLS`JVW)Wh|6ru2^?Lz{@Aw7(D%u% zxo?poJi4-BbUDxt+D4|=5h_#ewbTz~1i@Rsfjtf-oLjtgO<+SdE2yP#D*`@q9FG7Z zLuHX})^hD$V+ENrV04I0u3Im9{#&<%RFKM=qxlGtKSFmN)Twy zo%#YfglD3>5P`fxW0E?$8LT@DYiD>WGYS*suEPhKD-V9|dwk-SHfnwjovXW4ck%rP8ouEA zRW^B>1-*cPv`dw6d1Nqtlj`3D0%S6;ABHmJwjbx>6(lrY+zAk*BjGVay)r<+eiDjH zWKj1vBTr5=o*9gg(}H2a34wXZkaVTs<^Fx~BefJ!g~tG0fXAGSFu*g0=tt0#Xkupa zbd-M|H6}2G7r1~J-S~rk3m(Bf1$Y^^p0|(MLpHnwp2qBevvY}rH%d%%2$&4ij_}{> z&D#+zjKz2c#C@=`o!g><^y6O>*8+cG)Dw1v6PCDq<0fbbVauDtqR>*kGOH@hHO!f8KFNs5MZJ^y}$?0!tu9{|>y5R?rY#^rx_909bX<~ z-dRUFybF7PM`&j37dcV#AkNZ+sYaaD93R!t%4Wj}(hRAgytaGMLBN8xK#<+E$ri~= zd1sE4b0leU33h5cQwZM^d!h@5Pc!MC{n2y5ZxP~MLh#cdx_ZS<~y#QS`17E zsjC0TX2aZXOl<~QQ9YTh{#lM0R zpaBqrrYkyH#}5Jk!HN(BPKl=HI!wSXuds z=9~F8I0)Z8@FqUPAc!P~0CL2)HXd}Wt`!w`-%uvAGemWFi3J%K#x|o}Ey0b;n8>}# z0&1$nQlSbwt!zh`yTCdExqF?(*I8UaAs0GnE$m3alRP=cccHN6fMQM*5nn4uL93RNguGYI|1tVwIKSdysbOkuPzhFE3 zG;nNX`=E6JqYk+O`1G6i{9u!~pJws>IPq((@Ys0}N85;&4_3~b7-NNg#M$=YN8vQm zd$p+UFA#BkVZxJBc3so2nOv#T<&Gt=3i^R1KG0EU#^6%~_h3(U4(B$)$$B%7c1$XikRbV}u3(;8CY10|tfh4~IA+P6qzjgniv=rT ztvmj+3I2N+XpECkVEvY?V`%|*qye7gkJ2O{>d#i(G zZ@81~7xV8dzU4moLBWT7FNae^lt5>qaBS?TpjrfSHwf8)76|X!5h5QEy2-(aX#Wmr z8B*7RibrBOQ^)=k}5{cexMy>lWd=`K# z^8y<%4Dg%0^{X}Yx`!)9fsCFi4sUKyIGA3?B@~7SmTo7;tOYTCjCbzuq3D|RGr`6g zDj>ry1vgxh|3 zH7F=HUG@PpA)P;n(*0c&lHTE!(GS`W?=D|=f5_LAl*u-6V{qzj#9?>sg1DiN+!lA1 zukgQTJJq}2MbC?E8&n4@`YsxDwGGKVvqW=iEg5=>L?i({M)m|tN1N`_AAgNJimlcp z@6s!l66Wcy+xd|3tfQ4_UOWsEAA-5<4fe~9uq|mvLAAt1XNk#T+gomzb^0B zKkMZMHQY+R-m^TAl6B($#gYj^X7^yF7N~YW!xnI?W0007c20;yctio%K-%&eX6d|<1bK&k8 z`Sb-B9=OApU}|LH?l6-l$RAw4u~k*otp(g6U;=cJ+=9re)cb&`&Ar`H@)N5&<*=F3 zB0IJ^eFw=EOd}VBW;~{T{)P85<`HRGY}WT?;w-&d|&s@nN*}jx?@TjOW-|5*gx?Rnv9JK5*LU6S#6= z$8j0Nbm|=>`ALt+CuRL*+pK?@nQybSez|j5vUg7Qu=gpk{}ZlWWO=x`K_@7r91#u_ z1$a*udZwL{X~CO_{eWo?7@1=j@pkrdtDvf42voTW!l?FV8N949moOdw3g6k|Jj679 z3$4V)0rqw6{wX{CHtXnua$j#E)JP(6G;+j)r;Qksh__ABZIkJc_=7m&e!_y0h
36Y!S=sFyRMj?T0)!<7pCo4Mv_Yu
z0d=zDeyVubS>nPnSIoW**u+IiZPPBXKp;0(3$)U2x7#7J1dUqiny9Jhm?$T!m^-PL
zL7GMV87Z#J{c{xEKu&ZusD~2J9b(ngt&h3tG6?GXF=+3IRkVzx!EsMo;1{Bae~lqO
zXST1Mi1^Hm`S;nlUdMt+aY
zMTe%?hbWc#+qm+;*}>qLQ1rpjs0MDWU2d<-NY7%m@M3zl!(5H;t>F$poZ|lO0hU= 0 and len( self.buffers[ pid ] ) >= self.auto_flush_num:
-                    self.flush( pid )
-                elif self.auto_flush_time >= 0 and ( datetime.datetime.now( ) - self.buffer_write_times[ pid ] ).microseconds >= self.auto_flush_time:
-                    self.flush( pid )
-                # TODO: if buffer is not empty and we don't auto flush on write, sleep until a time then auto flush according to auto_flush_time
-            elif event == Console.MSG.FLUSH:
-                pid = data[ 1 ]
-                if pid in self.buffers:
-                    for line in self.buffers[ pid ]:
-                        print( line )
-                    self.buffers.pop( pid, None )
-                    self.buffer_write_times[ pid ] = datetime.datetime.now( )
-            elif event == Console.MSG.CLEAR:
-                pid = data[ 1 ]
-                if pid in self.buffers:
-                    self.buffers.pop( pid, None )
-
-            self.queue.task_done( )
-
-# class Task( threading.Event ):
-    # def __init__( data, cmd = None ):
-        # threading.Event.__init__( self )
-
-        # self.cmd = cmd if cmd is None MSG.RUN_FUNCTION
-        # self.data = data
-
-    # def isDone( self ):
-        # return self.isSet()
-
-    # def join( self ):
-        # self.wait( )
-
-class Worker( threading.Thread ):
-    def __init__( self, console, queue, files_to_ignore ):
-        threading.Thread.__init__( self )
-
-        self.console = console
-        self.queue = queue
-        self.files_to_ignore = files_to_ignore
-
-    def run( self ):
-        while True:
-            ( cmd, data ) = self.queue.get( )
-
-            if cmd == MSG.SHUTDOWN:
-                self.console.flush( )
-                self.queue.task_done( )
-                break
-
-            if cmd == MSG.RUN_FUNCTION:
-                break
-
-            if cmd != MSG.PARSE_DIRECTORY or data is None:
-                self.console.flush( )
-                self.queue.task_done( )
-                continue
-
-            directory = data
-
-            # add threading stuffs
-
-            self.queue.task_done( )
-
+#==============================================================
 def main( args ):
     start = time.clock()
 
-    # check requirements
-    if call_process( 'p4 -V' ) != 0:
-        print( 'Perforce Command-line Client(p4) is required for this script.' )
-        sys.exit( 1 )
+    fail_if_no_p4()
 
     #http://docs.python.org/library/optparse.html
     parser = optparse.OptionParser( )
@@ -336,184 +35,123 @@ def main( args ):
 
     directory = normpath( options.directory if options.directory is not None else os.getcwd( ) )
 
-    # get user
-    print("\nChecking p4 info...")
-    result = get_p4_py_results('info')
-    if len(result) == 0 or b'userName' not in result[0].keys():
-        print("Can't find perforce info, is it even setup?")
-        sys.exit(1)
-    username = get_str_from_process_stdout(result[0][b'userName'])
-    client_host = get_str_from_process_stdout(result[0][b'clientHost'])
-    print("|Done.")
-
-    client_root = get_client_root()
-    ldirectory = directory.lower()
-    workspace_name = None
-    if client_root is None or not ldirectory.startswith(client_root.lower()):
-        print("\nCurrent directory not in client view, checking other workspaces for user '" + username + "' ...")
-
-        workspace_name = parse_info_from_command('p4 info', 'Client name: ')
-
-        # get user workspaces
-        result = get_p4_py_results('workspaces -u ' + username)
-        workspaces = []
-        for r in result:
-            whost = get_str_from_process_stdout(r[b'Host'])
-            if whost is not None and len(whost) != 0 and client_host != whost:
-                continue
-            workspace = {'root': get_str_from_process_stdout(r[b'Root']), 'name': get_str_from_process_stdout(r[b'client'])}
-            workspaces.append(workspace)
-
-        del result
-
-        # check current directory against current workspace, see if it matches existing workspaces.
-        for w in workspaces:
-            wname = w['name']
-            wlower = w['root'].lower()
-            if ldirectory.startswith(wlower):
-                # set current directory, don't forget to revert it back to the existing one
-                print("|Setting client view to: " + wname)
-
-                if try_call_process( 'p4 set P4CLIENT=' + wname ):
-                    print("|There was a problem trying to set the p4 client view (workspace).")
-                    sys.exit(1)
-                break
-        else:
-            print( "|Couldn't find a workspace root that matches the current directory for the current user." )
-            sys.exit(1)
-        print("|Done.")
-
-
-    # Files are added from .p4ignore
-    # Key is the file root, the value is the table of file regexes for that directory.
-    files_to_ignore = PDict()
-
-    processed_file_count = 0
-    processed_directory_count = 0
-    
-    remove_file_count = 0
-    remove_dir_count = 0
-    warning_count = 0
-    error_count = 0
-
     with Console( auto_flush_num=20, auto_flush_time=1000 ) as c:
-        if not options.quiet:
-            c.writeflush( "\nCaching files in depot, this may take a little while..." )
+        with P4Workspace( directory ):
+            # Files are added from .p4ignore
+            # Key is the file root, the value is the table of file regexes for that directory.
+            files_to_ignore = PDict()
 
-        # TODO: push this off to a thread and walk the directory so we get a headstart.
-        files_in_depot = get_client_set( directory )
-
-        c.writeflush( "|Done." )
-
-        # TODO: push a os.walk request off to a thread to build a list of files in the directory; create batch based on directory?
-
-        # TODO: at this point join on both tasks to wait until they're done
-
-        # TODO: kick off file removal, make batches from the files for threads to work on since testing has to be done for each.
-        #       need to figure out the best way to do this since the ignore list needs to be properly built for each directory;
-        #       will at least need to redo how the ignore lists are handled for efficiencies sake.
-
-        if not options.quiet:
-            c.writeflush( "\nChecking " + directory)
-        for root, dirs, files in os.walk( directory ):
-            ignore_list = get_ignore_list( root, files_to_ignore )
-
-            if not options.quiet:
-                c.write( "|Checking " + os.path.relpath( root, directory ) )
-
-            for d in dirs:
-                processed_directory_count += 1
-                path = join( root, d )
-                rel_path = os.path.relpath( path, directory )
-
-                if match_in_ignore_list( path, ignore_list ):
-                    # add option of using send2trash
-                    if not options.quiet:
-                        c.write( "| ignoring " + rel_path )
-                    dirs.remove( d )
-
-            for f in files:
-                processed_file_count += 1
-                path = normpath( join( root, f ) )
-
-                if path not in files_in_depot:
-                    if not options.quiet:
-                        c.write( "| " + f + " is unversioned, removing it." )
-                    try:
-                        os.chmod( path, stat.S_IWRITE )
-                        os.remove( path )
-                        remove_file_count += 1
-                    except OSError as ex:
-                        c.writeflush( "|  " + type( ex ).__name__ )
-                        c.writeflush( "|  " + repr( ex ) )
-                        c.writeflush( "|  ^ERROR^" )
-
-                        error_count += 1
-        if not options.quiet:
-            c.write( "|Done." )
-
-        if not options.quiet:
-            c.write( os.linesep + "Removing empty directories...")
-        # remove empty directories in reverse order
-        for root, dirs, files in os.walk( directory, topdown=False ):
-            ignore_list = get_ignore_list( root, files_to_ignore )
-
-            for d in dirs:
-                processed_directory_count += 1
-                path = os.path.join( root, d )
-                rel_path = os.path.relpath( path, directory )
-
-                if match_in_ignore_list( path, ignore_list ):
-                    # add option of using send2trash
-                    if not options.quiet:
-                        c.write( "| ignoring " + rel_path )
-                    dirs.remove( d )
-                try:
-                    os.rmdir(path)
-                    remove_dir_count += 1
-                    if not options.quiet:
-                        c.write( "| " + rel_path + " was removed." )
-                except OSError:
-                    # Fails on non-empty directory
-                    pass
-        if not options.quiet:
-            c.write( "|Done." )
-
-        # This needs to happen automatically even when an exception happens, when we leave scope.
-        if workspace_name is not None:
-            c.write("\nReverting back to original client view...")
-            # set workspace back to the original one
-            if try_call_process( 'p4 set P4CLIENT=' + workspace_name ):
-                error_count += 1
-                if not options.quiet:
-                    c.write("|There was a problem trying to restore the set p4 client view (workspace).")
-            else:
-                if not options.quiet:
-                    c.write("|Reverted client view back to '" + workspace_name + "'.")
-            if not options.quiet:
-                c.write("|Done.")
-
-        if not options.quiet:
-            output = "\nChecked " + str( processed_file_count ) + singular_pulural( processed_file_count, " file, ", " files, " )
-            output += str( processed_directory_count ) + singular_pulural( processed_directory_count, " directory", " directories")
+            processed_file_count = 0
+            processed_directory_count = 0
             
-            output += "\nRemoved " + str( remove_file_count ) + singular_pulural( remove_file_count, " file, ", " files, " )
-            output += str( remove_dir_count ) + singular_pulural( remove_dir_count, " directory", " directories")
+            remove_file_count = 0
+            remove_dir_count = 0
+            warning_count = 0
+            error_count = 0
 
-            if warning_count > 0:
-                output += " w/ " + str( warning_count ) + singular_pulural( warning_count, " warning", " warnings" )
-            if error_count > 0:
-                output += " w/ " + str( error_count ) + singular_pulural( error_count, " error", " errors" )
+            if not options.quiet:
+                c.writeflush( "\nCaching files in depot, this may take a little while..." )
 
-            end = time.clock()
-            delta = end - start
-            output += "\nFinished in " + str(delta) + "s"
+            # TODO: push this off to a thread and walk the directory so we get a headstart.
+            files_in_depot = get_client_set( directory )
 
-            c.write( output )
+            c.writeflush( "|Done." )
+
+            # TODO: push a os.walk request off to a thread to build a list of files in the directory; create batch based on directory?
+
+            # TODO: at this point join on both tasks to wait until they're done
+
+            # TODO: kick off file removal, make batches from the files for threads to work on since testing has to be done for each.
+            #       need to figure out the best way to do this since the ignore list needs to be properly built for each directory;
+            #       will at least need to redo how the ignore lists are handled for efficiencies sake.
+
+            if not options.quiet:
+                c.writeflush( "\nChecking " + directory)
+            for root, dirs, files in os.walk( directory ):
+                ignore_list = get_ignore_list( root, files_to_ignore )
+
+                if not options.quiet:
+                    c.write( "|Checking " + os.path.relpath( root, directory ) )
+
+                for d in dirs:
+                    processed_directory_count += 1
+                    path = join( root, d )
+                    rel_path = os.path.relpath( path, directory )
+
+                    if match_in_ignore_list( path, ignore_list ):
+                        # add option of using send2trash
+                        if not options.quiet:
+                            c.write( "| ignoring " + rel_path )
+                        dirs.remove( d )
+
+                for f in files:
+                    processed_file_count += 1
+                    path = normpath( join( root, f ) )
+
+                    if path not in files_in_depot:
+                        if not options.quiet:
+                            c.write( "| " + f + " is unversioned, removing it." )
+                        try:
+                            os.chmod( path, stat.S_IWRITE )
+                            os.remove( path )
+                            remove_file_count += 1
+                        except OSError as ex:
+                            c.writeflush( "|  " + type( ex ).__name__ )
+                            c.writeflush( "|  " + repr( ex ) )
+                            c.writeflush( "|  ^ERROR^" )
+
+                            error_count += 1
+            if not options.quiet:
+                c.write( "|Done." )
+
+            if not options.quiet:
+                c.write( os.linesep + "Removing empty directories...")
+            # remove empty directories in reverse order
+            for root, dirs, files in os.walk( directory, topdown=False ):
+                ignore_list = get_ignore_list( root, files_to_ignore )
+
+                for d in dirs:
+                    processed_directory_count += 1
+                    path = os.path.join( root, d )
+                    rel_path = os.path.relpath( path, directory )
+
+                    if match_in_ignore_list( path, ignore_list ):
+                        # add option of using send2trash
+                        if not options.quiet:
+                            c.write( "| ignoring " + rel_path )
+                        dirs.remove( d )
+                    try:
+                        os.rmdir(path)
+                        remove_dir_count += 1
+                        if not options.quiet:
+                            c.write( "| " + rel_path + " was removed." )
+                    except OSError:
+                        # Fails on non-empty directory
+                        pass
+            if not options.quiet:
+                c.write( "|Done." )
+
+            if not options.quiet:
+                output = "\nChecked " + str( processed_file_count ) + singular_pulural( processed_file_count, " file, ", " files, " )
+                output += str( processed_directory_count ) + singular_pulural( processed_directory_count, " directory", " directories")
+                
+                output += "\nRemoved " + str( remove_file_count ) + singular_pulural( remove_file_count, " file, ", " files, " )
+                output += str( remove_dir_count ) + singular_pulural( remove_dir_count, " directory", " directories")
+
+                if warning_count > 0:
+                    output += " w/ " + str( warning_count ) + singular_pulural( warning_count, " warning", " warnings" )
+                if error_count > 0:
+                    output += " w/ " + str( error_count ) + singular_pulural( error_count, " error", " errors" )
+
+                end = time.clock()
+                delta = end - start
+                output += "\nFinished in " + str(delta) + "s"
+
+                c.write( output )
 
 if __name__ == "__main__":
     try:
         main( sys.argv )
     except:
-        print( "Unexpected error!" )
+        print( "\nUnexpected error!" )
         traceback.print_exc( file = sys.stdout )
\ No newline at end of file
diff --git a/p4SyncMissingFiles.py b/p4SyncMissingFiles.py
new file mode 100644
index 0000000..f317737
--- /dev/null
+++ b/p4SyncMissingFiles.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf8 -*-
+# author              : Brian Ernst
+# python_version      : 2.7.6 and 3.4.0
+# =================================
+
+# TODO: setup batches before pushing to threads and use p4 --parallel
+# http://www.perforce.com/perforce/r14.2/manuals/cmdref/p4_sync.html
+
+from p4Helper import *
+
+import time, traceback
+
+
+#==============================================================
+def main( args ):
+    start = time.clock()
+
+    fail_if_no_p4()
+
+     #http://docs.python.org/library/optparse.html
+    parser = optparse.OptionParser( )
+
+    parser.add_option( "-d", "--dir", dest="directory", help="Desired directory to crawl.", default=None )
+    parser.add_option( "-t", "--threads", dest="thread_count", help="Number of threads to crawl your drive and poll p4.", default=100 )
+    parser.add_option( "-q", "--quiet", action="store_true", dest="quiet", help="This overrides verbose", default=False )
+    parser.add_option( "-v", "--verbose", action="store_true", dest="verbose", default=False )
+    parser.add_option( "-i", "--interactive", action="store_true", dest="interactive", default=False )
+
+    ( options, args ) = parser.parse_args( args )
+
+    directory = normpath( options.directory if options.directory is not None else os.getcwd( ) )
+
+    with Console( auto_flush_num=20, auto_flush_time=1000 ) as c:
+        with P4Workspace( directory ):
+            if not options.quiet:
+                c.writeflush( "Preparing to sync missing files..." )
+                c.write( " Setting up threads..." )
+
+            # Setup threading
+            WRK = enum( 'SHUTDOWN', 'SYNC' )
+
+            def shutdown( data ):
+                return False
+            def sync( data ):
+                if data is not None and not os.path.exists( data ):
+                    try_call_process( "p4 sync -f " + data )
+                    if not options.quiet:
+                        c.write( " Synced " + data )
+                return True
+
+            commands = {
+                WRK.SHUTDOWN : shutdown,
+                WRK.SYNC : sync
+            }
+
+            threads = [ ]
+            thread_count = options.thread_count if options.thread_count > 0 else multiprocessing.cpu_count( ) + threads
+
+            queue = multiprocessing.JoinableQueue( )
+
+            for i in range( thread_count ):
+                t = Worker( c, queue, commands )
+                threads.append( t )
+                t.start( )
+
+            make_drive_upper = True if os.name == 'nt' or sys.platform == 'cygwin' else False
+
+            command = "p4 fstat ..."
+
+            if not options.quiet:
+                c.writeflush( " Checking files in depot, this may take some time for large depots..." )
+
+            proc = subprocess.Popen( command.split( ), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=directory )
+
+            clientFile_tag = "... clientFile "
+            headAction_tag = "... headAction "
+
+            # http://www.perforce.com/perforce/r12.1/manuals/cmdref/fstat.html
+            accepted_actions = [ 'add', 'edit', 'branch', 'move/add', 'move\\add', 'integrate', 'import', 'archive' ] #currently not checked
+            rejected_actions = [ 'delete', 'move/delete', 'move\\delete', 'purge' ]
+
+            client_file = None
+
+            for line in proc.stdout:
+                line = get_str_from_process_stdout( line )
+
+                if client_file and line.startswith( headAction_tag ):
+                    action = normpath( line[ len( headAction_tag ) : ].strip( ) )
+                    if not any(action == a for a in rejected_actions):
+                        if options.verbose:
+                            c.write( " Checking " + os.path.relpath( local_path, directory ) )
+                        queue.put( ( WRK.SYNC, local_path ) )
+
+                if line.startswith( clientFile_tag ):
+                    client_file = None
+                    local_path = normpath( line[ len( clientFile_tag ) : ].strip( ) )
+                    if make_drive_upper:
+                        drive, path = splitdrive( local_path )
+                        client_file = ''.join( [ drive.upper( ), path ] )
+
+                if len(line.rstrip()) == 0:
+                    client_file = None
+
+            proc.wait( )
+
+            for line in proc.stderr:
+                if "no such file" in line:
+                    continue
+                #raise Exception(line)
+                c.write(line)#log as error
+
+            if not options.quiet:
+                c.writeflush( " Pushed work, now waiting for threads..." )
+
+            for i in range( thread_count ):
+                queue.put( ( WRK.SHUTDOWN, None ) )
+
+            for t in threads:
+                t.join( )
+
+            if not options.quiet:
+                c.write( "Done." )
+
+                end = time.clock()
+                delta = end - start
+                output = "\nFinished in " + str(delta) + "s"
+
+                c.writeflush( output )
+
+if __name__ == "__main__":
+    try:
+        main( sys.argv )
+    except:
+        print( "\nUnexpected error!" )
+        traceback.print_exc( file = sys.stdout )
\ No newline at end of file