That’s a kink I’ve yet to figure out. It has to create the partitions before it tries to restore the partitions. I don’t know how this is completed as the scripts used for the deploy process are:
[code] elif [ “$imgType” = “mps” ]; then
# Restore multipartion for single drive
echo -n " * Looking for Hard Disks…“;
#disk=fogpartinfo --list-devices 2>/dev/null | cut -d' ' -f 1
getHardDisk;
echo “Done”;
echo " * Using Hard Disk: $hd”;
if [ -n "$hd" ]
then
# check image
if [ -d "$imagePath" ]
then
tmpMBR="${imagePath}/d1.mbr";
if [ -f "$tmpMBR" ]
then
echo -n " * Restoring MBR...............................";
dd if=$tmpMBR of=$hd &>/dev/null
if [ "$osid" == "50" ]
then
fdisk $hd &>/dev/null << EOFLINUXFIX
w
EOFLINUXFIX
fi
echo “Done”;
echo -n " * Checking hard disks.........................";
runPartprobe;
echo "Done";
parts=`fogpartinfo --list-parts $hd 2>/dev/null`
diskLength=`expr length $hd`;
for part in $parts
do
partNum=${part:$diskLength};
echo " * Processing Partition: $part (${partNum})";
sleep 1;
imgpart="${imagePath}/d1p${partNum}.img";
if [ "$osid" == "50" ]; then
echo " * Preparing linux filesystem...";
parted -s $hd -a opt &>/dev/null << EOFPTD
mkfs
yes
$partNum
fat32
q
EOFPTD
runPartprobe;
sleep 10;
fi
if [ -f “$imgpart” ]; then
if [ “$mc” = “yes” ]; then
if [ “$partNum” == “2” ] && [ “$osid” == “6” ]; then
udp-receiver --nokbd --portbase ${port} --mcast-rdv-address ${storageip} 2>/dev/null | partclone.restore -O $part -N -f 1 2>/tmp/status.fog
elif [ “$partNum” == “3” ] && [ “$osid” == “6” ]; then
udp-receiver --nokbd --portbase ${port} --mcast-rdv-address ${storageip} 2>/dev/null | partclone.restore -O $part -N -f 1 2>/tmp/status.fog
else
udp-receiver --nokbd --portbase ${port} --mcast-rdv-address ${storageip} 2>/dev/null | partclone.restore -O $part -N -f 1 2>/tmp/status.fog
fi
else
export FOGSTATS=“1”;
if [ “$partNum” == “2” ] && [ “$osid” == “6” ]; then
pigz -d -c < $imgpart | partclone.restore -O $part -N -f 1 2>/tmp/status.fog
elif [ “$partNum” == “3” ] && [ “$osid” == “6” ]; then
pigz -d -c < $imgpart | partclone.restore -O $part -N -f 1 2>/tmp/status.fog
else
pigz -d -c < $imgpart | partclone.restore -O $part -N -f 1 2>/tmp/status.fog
fi
fi
else
echo " * Partition File missing: $imgpart";
sleep 9;
fi
sleep 1;
clearScreen;
echo " * Image Restored";
done
if [ “$osid” == “50” ]
then
echo " * Restore Linux swap paritions:“;
swaps=LANG=C fdisk -l | grep swap | cut -d' ' -f1 2>/dev/null
;
IFS=’
';
for x in $swaps
do
echo " * Restoring partition: $x”;
mkswap $x &>/dev/null
done
fi
echo "";
echo " * Task complete!";
echo "";
else
handleError "Image Store Corrupt: Unable to locate MBR.";
fi
else
handleError "Unable to locate image store.";
fi
else
handleError "Fatal Error: Disk device not found!";
fi[/code]
You’ll notice it doesn’t create the partitions itself, it uses the MBR of the system to generate the partition table for this block.
My guess is something is wrong with the DD command creating the backup MBR during the upload process.