1
0
Fork 0
mirror of https://github.com/putnam/binmerge.git synced 2025-04-19 08:28:06 +02:00

fixing splitting naming convention, documentation improvements

This commit is contained in:
Chris Putnam 2020-03-21 16:38:48 -05:00
parent efff615b97
commit 1e6cacd49f

View file

@ -105,9 +105,23 @@ def cuestamp_to_sectors(stamp):
fields = int(m.group(3))
return fields + (seconds * 75) + (minutes * 60 * 75)
# Generates track filename based on redump naming convention
# (Note: prefix may contain a fully qualified path)
def track_filename(prefix, track_num, track_count):
# Redump is strangely inconsistent in their datfiles and cuesheets when it
# comes to track numbers. The naming convention currently seems to be:
# If there are less than 10 tracks: "Track 1", "Track 2", etc.
# If there are more than 10 tracks: "Track 01", "Track 02", etc.
#
# It'd be nice if it were consistently %02d!
#
if track_count > 9:
return "%s (Track %02d).bin" % (prefix, track_num)
return "%s (Track %d).bin" % (prefix, track_num)
def gen_merged_cuesheet(bin_filename, files):
cuesheet = 'FILE "%s" BINARY\n' % bin_filename
# Generates a 'merged' cuesheet, that is, one bin file with tracks indexed within.
def gen_merged_cuesheet(basename, files):
cuesheet = 'FILE "%s.bin" BINARY\n' % basename
# One sector is (BLOCKSIZE) bytes
sector_pos = 0
for f in files:
@ -118,19 +132,21 @@ def gen_merged_cuesheet(bin_filename, files):
sector_pos += f.size / Track.globalBlocksize
return cuesheet
def gen_split_cuesheet(bin_filename, merged_file):
# similar to merged, could have it do both, but separate arguably cleaner
# Generates a 'split' cuesheet, that is, with one bin file for every track.
def gen_split_cuesheet(basename, merged_file):
cuesheet = ""
for t in merged_file.tracks:
cuesheet += 'FILE "%s (Track %02d).bin" BINARY\n' % (bin_filename, t.num)
track_fn = track_filename(basename, t.num, len(merged_file.tracks))
cuesheet += 'FILE "%s" BINARY\n' % track_fn
cuesheet += ' TRACK %02d %s\n' % (t.num, t.track_type)
for i in t.indexes:
sector_pos = i['file_offset'] - t.indexes[0]['file_offset']
cuesheet += ' INDEX %02d %s\n' % (i['id'], sectors_to_cuestamp(sector_pos))
return cuesheet
# Merges files together to new file `merged_filename`, in listed order.
def merge_files(merged_filename, files):
# cat is actually faster, but I prefer multi-platform and no special-casing
# cat is actually a bit faster, but this is multi-platform and no special-casing
chunksize = 1024 * 1024
with open(merged_filename, 'wb') as outfile:
for f in files:
@ -142,12 +158,12 @@ def merge_files(merged_filename, files):
outfile.write(chunk)
return True
# Writes each track in a File to a new file
def split_files(new_basename, merged_file):
# use calculated sectors, read the same amount, start new file when equal
with open(merged_file.filename, 'rb') as infile:
for t in merged_file.tracks:
chunksize = 1024 * 1024
out_name = '%s (Track %02d).bin' % (new_basename, t.num)
out_name = track_filename(new_basename, t.num, len(merged_file.tracks))
tracksize = t.sectors * Track.globalBlocksize
written = 0
with open(out_name, 'wb') as outfile:
@ -164,17 +180,17 @@ def split_files(new_basename, merged_file):
def main():
parser = argparse.ArgumentParser(description="Using a cuesheet, merges numerous bin files into a single bin file and produces a new cuesheet with corrected offsets. Works great with Redump. Supports all block modes, but only binary track types. Should work on any python3 platform.")
parser.add_argument('cuefile', help='path to source cuefile with multiple referenced bin tracks')
parser.add_argument('new_name', help='name (without extension) for your new bin/cue files')
parser.add_argument('--split', help='Change mode from merging to splitting to allow reconstruction of the split format.', required=False, action="store_true")
parser.add_argument('basename', help='name (without extension) for your new bin/cue files')
parser.add_argument('--split', help='reverses operation, splitting merged files back to individual tracks', required=False, action="store_true")
parser.add_argument('-o', dest='outdir', required=False, default=False, help='output directory. defaults to the same directory as source cue')
args = parser.parse_args()
cue_map = read_cue_file(args.cuefile)
if args.split:
cuesheet = gen_split_cuesheet(args.new_name, cue_map[0])
cuesheet = gen_split_cuesheet(args.basename, cue_map[0])
else:
cuesheet = gen_merged_cuesheet(args.new_name+'.bin', cue_map)
cuesheet = gen_merged_cuesheet(args.basename, cue_map)
outdir = os.path.dirname(args.cuefile)
if args.outdir:
@ -184,20 +200,20 @@ def main():
print("Output dir does not exist")
return False
with open(os.path.join(outdir, args.new_name+'.cue'), 'w', newline='\r\n') as f:
with open(os.path.join(outdir, args.basename+'.cue'), 'w', newline='\r\n') as f:
f.write(cuesheet)
print("Wrote %s" % args.new_name+'.cue')
print("Wrote %s" % args.basename+'.cue')
if args.split:
print("Splitting files...")
if split_files(os.path.join(outdir, args.new_name), cue_map[0]):
if split_files(os.path.join(outdir, args.basename), cue_map[0]):
print("Wrote %d bin files" % len(cue_map[0].tracks))
else:
print("Unable to split bin files")
else:
print("Merging files...")
if merge_files(os.path.join(outdir, args.new_name+'.bin'), cue_map):
print("Wrote %s" % args.new_name+'.bin')
if merge_files(os.path.join(outdir, args.basename+'.bin'), cue_map):
print("Wrote %s" % args.basename+'.bin')
else:
print("Unable to merge bin files")