Fixed scripts up, improved logging so console has a waking thread now.
Also fixed bug if console timer is too long it'll be killed off appropriately.
This commit is contained in:
parent
ea14f96d76
commit
92d217371c
21
p4Helper.py
21
p4Helper.py
|
@ -81,11 +81,11 @@ def match_in_ignore_list( path, ignore_list ):
|
|||
|
||||
#==============================================================
|
||||
def call_process( args ):
|
||||
return subprocess.call( args.split( ), stdout=subprocess.PIPE, stderr=subprocess.PIPE )
|
||||
return subprocess.call( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
|
||||
|
||||
def try_call_process( args, path=None ):
|
||||
try:
|
||||
subprocess.check_output( args.split( ), shell=False, cwd=path, stderr=subprocess.STDOUT )
|
||||
subprocess.check_output( args, shell=False, cwd=path )#, stderr=subprocess.STDOUT )
|
||||
return 0
|
||||
except subprocess.CalledProcessError:
|
||||
return 1
|
||||
|
@ -113,7 +113,7 @@ def parse_info_from_command( args, value, path = None ):
|
|||
|
||||
def get_p4_py_results( args, path = None ):
|
||||
results = []
|
||||
proc = subprocess.Popen( [ 'p4', '-G' ] + args.split( ), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path )
|
||||
proc = subprocess.Popen( 'p4 -G ' + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path )
|
||||
try:
|
||||
while True:
|
||||
output = marshal.load( proc.stdout )
|
||||
|
@ -286,6 +286,14 @@ class PDict( dict ):
|
|||
class Console( threading.Thread ):
|
||||
MSG = enum('WRITE', 'FLUSH', 'SHUTDOWN', 'CLEAR' )
|
||||
|
||||
@staticmethod
|
||||
def wake(thread):
|
||||
thread.flush()
|
||||
if not thread.shutting_down:
|
||||
thread.wake_thread = threading.Timer(thread.auto_flush_time / 1000.0, Console.wake, [thread])
|
||||
thread.wake_thread.daemon = True
|
||||
thread.wake_thread.start()
|
||||
|
||||
# auto_flush_time is time in milliseconds since last flush to trigger a flush when writing
|
||||
def __init__( self, auto_flush_num = None, auto_flush_time = None ):
|
||||
threading.Thread.__init__( self )
|
||||
|
@ -296,6 +304,9 @@ class Console( threading.Thread ):
|
|||
self.auto_flush_num = auto_flush_num if auto_flush_num is not None else -1
|
||||
self.auto_flush_time = auto_flush_time * 1000 if auto_flush_time is not None else -1
|
||||
self.shutting_down = False
|
||||
self.wake_thread = None
|
||||
if self.auto_flush_time > 0:
|
||||
Console.wake(self)
|
||||
|
||||
def write( self, data, pid = None ):
|
||||
pid = pid if pid is not None else threading.current_thread().ident
|
||||
|
@ -319,6 +330,10 @@ class Console( threading.Thread ):
|
|||
return self
|
||||
|
||||
def __exit__( self, type, value, tb ):
|
||||
self.shutting_down = True
|
||||
if self.wake_thread:
|
||||
self.wake_thread.cancel()
|
||||
self.wake_thread.join()
|
||||
self.queue.put( ( Console.MSG.SHUTDOWN, ) )
|
||||
self.queue.join( )
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ def main( args ):
|
|||
|
||||
directory = normpath( options.directory if options.directory is not None else os.getcwd( ) )
|
||||
|
||||
with Console( auto_flush_num=20, auto_flush_time=1000 ) as c:
|
||||
with Console( auto_flush_time=1 ) as c:
|
||||
with P4Workspace( directory ):
|
||||
# Files are added from .p4ignore
|
||||
# Key is the file root, the value is the table of file regexes for that directory.
|
||||
|
|
|
@ -32,10 +32,10 @@ class P4SyncMissing:
|
|||
|
||||
directory = normpath( options.directory if options.directory is not None else os.getcwd( ) )
|
||||
|
||||
with Console( auto_flush_time=1000 ) as c:
|
||||
with Console( auto_flush_time=1 ) as c:
|
||||
with P4Workspace( directory ):
|
||||
if not options.quiet:
|
||||
c.writeflush( "Preparing to sync missing files..." )
|
||||
c.writeflush( "Retreiving missing files..." )
|
||||
c.writeflush( " Setting up threads..." )
|
||||
|
||||
# Setup threading
|
||||
|
@ -44,18 +44,29 @@ class P4SyncMissing:
|
|||
def shutdown( data ):
|
||||
return False
|
||||
def sync( files ):
|
||||
files_len = len(files)
|
||||
files_flat = ' '.join('"' + f + '"' for f in files)
|
||||
|
||||
if options.verbose:
|
||||
files_len = len(files)
|
||||
if files_len > 1:
|
||||
c.write( " Syncing batch of " + str(len(files)) + " ...")
|
||||
for f in files:
|
||||
c.write( " " + os.path.relpath( f, directory ) )
|
||||
else:
|
||||
for f in files:
|
||||
c.write( " Syncing " + os.path.relpath( f, directory ) + " ..." )
|
||||
|
||||
ret = -1
|
||||
count = 0
|
||||
while ret != 0 and count < 2:
|
||||
ret = try_call_process( "p4 sync -f " + files_flat )
|
||||
count += 1
|
||||
#if ret != 0 and not options.quiet:
|
||||
# c.write("Failed, trying again to sync " + files_flat)
|
||||
if ret != 0 and not options.quiet:
|
||||
c.write("Failed, trying again to sync " + files_flat)
|
||||
if ret != 0:
|
||||
pass
|
||||
#if not options.quiet:
|
||||
# c.write("Failed to sync " + files_flat)
|
||||
if not options.quiet:
|
||||
c.write("Failed to sync " + files_flat)
|
||||
else:
|
||||
if not options.quiet:
|
||||
files_len = len(files)
|
||||
|
@ -74,13 +85,17 @@ class P4SyncMissing:
|
|||
thread_count = options.thread_count if options.thread_count > 0 else multiprocessing.cpu_count( ) + threads
|
||||
|
||||
count = 0
|
||||
total = 0
|
||||
self.queue = multiprocessing.JoinableQueue( )
|
||||
|
||||
for i in range( thread_count ):
|
||||
t = Worker( c, self.queue, commands )
|
||||
t.daemon = True
|
||||
threads.append( t )
|
||||
t.start( )
|
||||
|
||||
c.writeflush( " Done." )
|
||||
|
||||
make_drive_upper = True if os.name == 'nt' or sys.platform == 'cygwin' else False
|
||||
|
||||
command = "p4 fstat ..."
|
||||
|
@ -105,6 +120,7 @@ class P4SyncMissing:
|
|||
file_type = None
|
||||
file_type_last = None
|
||||
|
||||
# todo: use fewer threads, increase bucket size and use p4 threading
|
||||
class Bucket:
|
||||
def __init__(self, limit):
|
||||
self.queue = []
|
||||
|
@ -118,7 +134,7 @@ class P4SyncMissing:
|
|||
|
||||
self.buckets = {}
|
||||
self.buckets[file_type_text] = Bucket(10)
|
||||
self.buckets[file_type_binary] = Bucket(1)
|
||||
self.buckets[file_type_binary] = Bucket(2)
|
||||
|
||||
def push_queued(bucket):
|
||||
if bucket.queue_size == 0:
|
||||
|
@ -153,11 +169,12 @@ class P4SyncMissing:
|
|||
if any(file_action == a for a in rejected_actions):
|
||||
file_action = None
|
||||
else:
|
||||
total += 1
|
||||
if os.path.exists( client_file ):
|
||||
file_action = None
|
||||
|
||||
elif line.startswith( clientFile_tag ):
|
||||
client_file = normpath( line[ len( clientFile_tag ) : ].strip( ) )
|
||||
client_file = line[ len( clientFile_tag ) : ].strip( )
|
||||
if make_drive_upper:
|
||||
drive, path = splitdrive( client_file )
|
||||
client_file = ''.join( [ drive.upper( ), path ] )
|
||||
|
@ -176,7 +193,8 @@ class P4SyncMissing:
|
|||
c.write(line)#log as error
|
||||
|
||||
if not options.quiet:
|
||||
c.writeflush( " Checking " + str(count) + " file(s), now waiting for threads..." )
|
||||
c.writeflush( " Done. Checked " + str(total) + " file(s)." )
|
||||
c.writeflush( " Queued " + str(count) + " file(s), now waiting for threads..." )
|
||||
|
||||
for i in range( thread_count ):
|
||||
self.queue.put( ( WRK.SHUTDOWN, None ) )
|
||||
|
@ -187,6 +205,9 @@ class P4SyncMissing:
|
|||
if not options.quiet:
|
||||
print( " Done." )
|
||||
|
||||
if not options.quiet:
|
||||
print( "Done." )
|
||||
|
||||
end = time.clock()
|
||||
delta = end - start
|
||||
output = "\nFinished in " + str(delta) + "s"
|
||||
|
|
Loading…
Reference in New Issue