fat16copy: Fix allocation logic when extending directories.
When extending a directory, empty directory entries were left behind signaling the end of a directory. Bug: 31570546 Test: make dist; mdir shows all the files. (cherry picked from commit d4516300b010c8155861e310dac871e8c10d3382) Change-Id: Ie3d82cfaad51be73911293a54d18746e8adf62a2
This commit is contained in:
@@ -222,11 +222,8 @@ class fat_dir(object):
|
|||||||
data.seek(0, os.SEEK_END)
|
data.seek(0, os.SEEK_END)
|
||||||
size = data.tell()
|
size = data.tell()
|
||||||
|
|
||||||
chunk = 0
|
# Empty files shouldn't have any clusters assigned.
|
||||||
|
chunk = self.backing.fs.allocate(size) if size > 0 else 0
|
||||||
if size > 0:
|
|
||||||
chunk = self.backing.fs.allocate(size)
|
|
||||||
|
|
||||||
(shortname, ext) = self.make_short_name(name)
|
(shortname, ext) = self.make_short_name(name)
|
||||||
self.add_dentry(0, shortname, ext, name, chunk, size)
|
self.add_dentry(0, shortname, ext, name, chunk, size)
|
||||||
|
|
||||||
@@ -393,28 +390,23 @@ class dentry(object):
|
|||||||
record_count = len(longname_record_data) + 1
|
record_count = len(longname_record_data) + 1
|
||||||
|
|
||||||
found_count = 0
|
found_count = 0
|
||||||
|
while found_count < record_count:
|
||||||
while True:
|
|
||||||
record = f.read(32)
|
record = f.read(32)
|
||||||
|
|
||||||
if record is None or len(record) != 32:
|
if record is None or len(record) != 32:
|
||||||
break
|
# We reached the EOF, so we need to extend the file with a new cluster.
|
||||||
|
f.write("\0" * self.fs.bytes_per_cluster)
|
||||||
|
f.seek(-self.fs.bytes_per_cluster, os.SEEK_CUR)
|
||||||
|
record = f.read(32)
|
||||||
|
|
||||||
marker = struct.unpack("B", record[0])[0]
|
marker = struct.unpack("B", record[0])[0]
|
||||||
|
|
||||||
if marker == DEL_MARKER or marker == 0:
|
if marker == DEL_MARKER or marker == 0:
|
||||||
found_count += 1
|
found_count += 1
|
||||||
|
|
||||||
if found_count == record_count:
|
|
||||||
break
|
|
||||||
else:
|
else:
|
||||||
found_count = 0
|
found_count = 0
|
||||||
|
|
||||||
if found_count != record_count:
|
f.seek(-(record_count * 32), os.SEEK_CUR)
|
||||||
f.write("\0" * self.fs.bytes_per_cluster)
|
|
||||||
f.seek(-self.fs.bytes_per_cluster, os.SEEK_CUR)
|
|
||||||
else:
|
|
||||||
f.seek(-(record_count * 32), os.SEEK_CUR)
|
|
||||||
f.write(entry)
|
f.write(entry)
|
||||||
|
|
||||||
class root_dentry_file(fake_file):
|
class root_dentry_file(fake_file):
|
||||||
@@ -637,7 +629,6 @@ class fat(object):
|
|||||||
Allocate a new cluster chain big enough to hold at least the given amount
|
Allocate a new cluster chain big enough to hold at least the given amount
|
||||||
of bytes.
|
of bytes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert amount > 0, "Must allocate a non-zero amount."
|
assert amount > 0, "Must allocate a non-zero amount."
|
||||||
|
|
||||||
f = self.f
|
f = self.f
|
||||||
@@ -702,39 +693,17 @@ class fat(object):
|
|||||||
Given a cluster which is the *last* cluster in a chain, extend it to hold
|
Given a cluster which is the *last* cluster in a chain, extend it to hold
|
||||||
at least `amount` more bytes.
|
at least `amount` more bytes.
|
||||||
"""
|
"""
|
||||||
return_cluster = None
|
if amount == 0:
|
||||||
|
return
|
||||||
f = self.f
|
f = self.f
|
||||||
|
entry_offset = FAT_TABLE_START + cluster * 2
|
||||||
position = FAT_TABLE_START + cluster * 2
|
f.seek(entry_offset)
|
||||||
f.seek(position)
|
|
||||||
|
|
||||||
assert read_le_short(f) == 0xFFFF, "Extending from middle of chain"
|
assert read_le_short(f) == 0xFFFF, "Extending from middle of chain"
|
||||||
rewind_short(f)
|
|
||||||
|
|
||||||
while position + 2 < FAT_TABLE_START + self.fat_size and amount > 0:
|
return_cluster = self.allocate(amount)
|
||||||
skip_short(f)
|
f.seek(entry_offset)
|
||||||
got = read_le_short(f)
|
self.write_cluster_entry(return_cluster)
|
||||||
rewind_short(f)
|
return return_cluster
|
||||||
rewind_short(f)
|
|
||||||
|
|
||||||
if got != 0:
|
|
||||||
break
|
|
||||||
|
|
||||||
cluster += 1
|
|
||||||
return_cluster = return_cluster or cluster
|
|
||||||
position += 2
|
|
||||||
self.write_cluster_entry(cluster)
|
|
||||||
amount -= self.bytes_per_cluster
|
|
||||||
|
|
||||||
if amount <= 0:
|
|
||||||
self.write_cluster_entry(0xFFFF)
|
|
||||||
return return_cluster
|
|
||||||
|
|
||||||
new_chunk = self.allocate(amount)
|
|
||||||
f.seek(FAT_TABLE_START + cluster * 2)
|
|
||||||
self.write_cluster_entry(new_chunk)
|
|
||||||
|
|
||||||
return return_cluster or new_chunk
|
|
||||||
|
|
||||||
def write_file(self, head_cluster, start_byte, data):
|
def write_file(self, head_cluster, start_byte, data):
|
||||||
"""
|
"""
|
||||||
@@ -745,35 +714,31 @@ class fat(object):
|
|||||||
data: The data to write.
|
data: The data to write.
|
||||||
"""
|
"""
|
||||||
f = self.f
|
f = self.f
|
||||||
|
last_offset = start_byte + len(data)
|
||||||
|
current_offset = 0
|
||||||
|
current_cluster = head_cluster
|
||||||
|
|
||||||
while True:
|
while current_offset < last_offset:
|
||||||
if start_byte < self.bytes_per_cluster:
|
# Write everything that falls in the cluster starting at current_offset.
|
||||||
to_write = data[:self.bytes_per_cluster - start_byte]
|
data_begin = max(0, current_offset - start_byte)
|
||||||
data = data[self.bytes_per_cluster - start_byte:]
|
data_end = min(len(data),
|
||||||
|
current_offset + self.bytes_per_cluster - start_byte)
|
||||||
|
if data_end > data_begin:
|
||||||
|
cluster_file_offset = (self.data_start() + self.root_entries * 32 +
|
||||||
|
(current_cluster - 2) * self.bytes_per_cluster)
|
||||||
|
f.seek(cluster_file_offset + max(0, start_byte - current_offset))
|
||||||
|
f.write(data[data_begin:data_end])
|
||||||
|
|
||||||
cluster_bytes_from_root = (head_cluster - 2) * \
|
# Advance to the next cluster in the chain or get a new cluster if needed.
|
||||||
self.bytes_per_cluster
|
current_offset += self.bytes_per_cluster
|
||||||
bytes_from_root = cluster_bytes_from_root + start_byte
|
if last_offset > current_offset:
|
||||||
bytes_from_data_start = bytes_from_root + self.root_entries * 32
|
f.seek(FAT_TABLE_START + current_cluster * 2)
|
||||||
|
next_cluster = read_le_short(f)
|
||||||
|
if next_cluster > MAX_CLUSTER_ID:
|
||||||
|
next_cluster = self.extend_cluster(current_cluster, len(data))
|
||||||
|
current_cluster = next_cluster
|
||||||
|
assert current_cluster > 0, "Cannot write free cluster"
|
||||||
|
|
||||||
f.seek(self.data_start() + bytes_from_data_start)
|
|
||||||
f.write(to_write)
|
|
||||||
|
|
||||||
if len(data) == 0:
|
|
||||||
return
|
|
||||||
|
|
||||||
start_byte -= self.bytes_per_cluster
|
|
||||||
|
|
||||||
if start_byte < 0:
|
|
||||||
start_byte = 0
|
|
||||||
|
|
||||||
f.seek(FAT_TABLE_START + head_cluster * 2)
|
|
||||||
next_cluster = read_le_short(f)
|
|
||||||
if next_cluster > MAX_CLUSTER_ID:
|
|
||||||
head_cluster = self.extend_cluster(head_cluster, len(data))
|
|
||||||
else:
|
|
||||||
head_cluster = next_cluster
|
|
||||||
assert head_cluster > 0, "Cannot write free cluster"
|
|
||||||
|
|
||||||
def add_item(directory, item):
|
def add_item(directory, item):
|
||||||
"""
|
"""
|
||||||
|
Reference in New Issue
Block a user