Fix volume unmount logic to clean up PVE's shadow mount
This commit is contained in:
parent
e164263106
commit
9eddb96402
@ -118,33 +118,22 @@ export class Provisioner {
|
||||
return `p5x-node-${(Math.random() + 1).toString(36).substring(9)}${nextNodeNum}`
|
||||
}
|
||||
|
||||
public async mountVolume(volume: Volume, mountpoint?: string, idOffset: number = 0): Promise<Volume> {
|
||||
public async mountVolume(volume: Volume, mountpoint?: string): Promise<Volume> {
|
||||
mountpoint = mountpoint || volume.getDefaultMountpoint()
|
||||
// TODO Lock the container's config
|
||||
|
||||
const node = await volume.getNode()
|
||||
|
||||
const ctConfig = await node.getConfigLines()
|
||||
const nextMountpoint = ctConfig.nextNthValue('mp') + idOffset
|
||||
const nextMountpoint = ctConfig.nextNthValue('mp')
|
||||
|
||||
// FIXME: unlock container config
|
||||
|
||||
const api = await this.getApi()
|
||||
const line = `${await volume.getQualifiedName()},mp=${mountpoint},backup=1`
|
||||
try {
|
||||
await api.nodes.$(node.unqualifiedPVEHost())
|
||||
.lxc.$(node.pveId)
|
||||
.config.$put({ [`mp${nextMountpoint}`]: line })
|
||||
} catch (e) {
|
||||
if ( idOffset > 4 ) {
|
||||
throw e
|
||||
}
|
||||
|
||||
this.logging.error('Error mounting volume! Will retry with a higher mountpointIdentifier. Original error:')
|
||||
this.logging.error(e)
|
||||
await node.updateConfig(lines => lines.removePending(line))
|
||||
return this.mountVolume(volume, mountpoint, idOffset + 1)
|
||||
}
|
||||
await api.nodes.$(node.unqualifiedPVEHost())
|
||||
.lxc.$(node.pveId)
|
||||
.config.$put({ [`mp${nextMountpoint}`]: line })
|
||||
|
||||
volume.mountpointIdentifier = `mp${nextMountpoint}`
|
||||
volume.mountpoint = mountpoint
|
||||
@ -166,10 +155,29 @@ export class Provisioner {
|
||||
await nodeHost.run(shellCommand(`umount "${volume.mountpoint}"`))
|
||||
volume.mountpoint = undefined
|
||||
|
||||
// TODO Lock the container's config
|
||||
// Okay, here's some fucky-wucky shit:
|
||||
// To avoid security vulnerabilities where hosts can umount their disks and break the
|
||||
// firewall between CT and host, when a disk is mounted to a CT, Proxmox opens a clone
|
||||
// of the mount in a special directory in the mount namespace of the container process'
|
||||
// parent (i.e. the "monitor" process).
|
||||
// If we umount the disk from the container, but not the monitor process, we won't be
|
||||
// able to reattach any disks to the same mpX path (mp0, mp1, mp2, &c.)
|
||||
// So to get around this, we (1) look up the container process, (2) figure out the
|
||||
// monitor process, then (3) umount the clone.
|
||||
// This was *such* a pain in the ass to figure out, but it's a testament to open-
|
||||
// source that I was able to do it at all.
|
||||
|
||||
const pveHost = await this.getPVEHost(node.pveHost.split('/')[1])
|
||||
const ctPIDResult = await pveHost.runLineResult(shellCommand(`lxc-info -n ${node.pveId} -p`))
|
||||
const ctPID = String(ctPIDResult).split('PID:')[1]?.trim()
|
||||
if ( !ctPID ) {
|
||||
throw new Error(`Could not cleanly unmount volume ${volume.volumeId}: could not find container PID`)
|
||||
}
|
||||
|
||||
const parentPID = await pveHost.runLineResult(shellCommand(`ps -o ppid= -p ${ctPID}`))
|
||||
await pveHost.run(shellCommand(`nsenter --target ${parentPID} --mount /bin/bash -c 'umount /var/lib/lxc/.pve-staged-mounts/${volume.mountpointIdentifier}'`))
|
||||
|
||||
// Replace the disk's mountpoint with an unused disk
|
||||
const pveHost = await this.getPVEHost(node.pveHost.split('/')[1])
|
||||
const pveFilesystem = await pveHost.getFilesystem()
|
||||
const ctConfig = pveFilesystem.getPath(`/etc/pve/lxc/${node.pveId}.conf`)
|
||||
const ctConfigLines = await ctConfig.read()
|
||||
@ -193,7 +201,7 @@ export class Provisioner {
|
||||
|
||||
volume.mountpointIdentifier = `unused${maxUnused+1}`
|
||||
|
||||
// Update the container's config and FIXME: unlock it
|
||||
// Update the container's config
|
||||
await ctConfig.write(newConfigLines.join('\n'))
|
||||
|
||||
volume.save()
|
||||
@ -509,14 +517,14 @@ export class Provisioner {
|
||||
return node
|
||||
}
|
||||
|
||||
public async createVolume(name: string, sizeInBytes: number, idOffset: number = 0): Promise<Volume> {
|
||||
public async createVolume(name: string, sizeInBytes: number): Promise<Volume> {
|
||||
this.logging.info(`Creating volume ${name} with size ${sizeInBytes / 1024}KiB...`)
|
||||
|
||||
const masterNode = await Node.getMaster()
|
||||
const api = await this.getApi()
|
||||
|
||||
let ctConfig = await masterNode.getConfigLines()
|
||||
const nextMountpoint = ctConfig.nextNthValue('mp') + idOffset
|
||||
const nextMountpoint = ctConfig.nextNthValue('mp')
|
||||
|
||||
// FIXME: unlock container config
|
||||
|
||||
@ -530,20 +538,9 @@ export class Provisioner {
|
||||
const provisionSizeInGiB = Math.max(Math.ceil(sizeInBytes/(1024*1024*1024)), 1)
|
||||
const storage = await Setting.loadOneRequired('pveStoragePool')
|
||||
const line = `${storage}:${provisionSizeInGiB},mp=${vol.getDefaultMountpoint()},backup=1`
|
||||
try {
|
||||
await api.nodes.$(masterNode.unqualifiedPVEHost())
|
||||
.lxc.$(masterNode.pveId)
|
||||
.config.$put({ [`mp${nextMountpoint}`]: line })
|
||||
} catch (e) {
|
||||
if ( idOffset > 4 ) {
|
||||
throw e
|
||||
}
|
||||
|
||||
this.logging.error('Encountered error while creating volume. Will retry. Original error:')
|
||||
this.logging.error(e)
|
||||
await masterNode.updateConfig(lines => lines.removePending(line))
|
||||
return this.createVolume(name, sizeInBytes, idOffset + 1)
|
||||
}
|
||||
await api.nodes.$(masterNode.unqualifiedPVEHost())
|
||||
.lxc.$(masterNode.pveId)
|
||||
.config.$put({ [`mp${nextMountpoint}`]: line })
|
||||
|
||||
ctConfig = await masterNode.getConfigLines()
|
||||
const mount = ctConfig.getForKey(`mp${nextMountpoint}`)
|
||||
|
Loading…
Reference in New Issue
Block a user