Skip to content
This repository has been archived by the owner on Feb 8, 2021. It is now read-only.

[RFC] Support for running runv without 9pfs #626

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 7 additions & 1 deletion Makefile.am
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
export GO15VENDOREXPERIMENT=1
if WITH_9P
9P_BUILD_TAG=with_9p
else
9P_BUILD_TAG=
endif

if WITH_XEN
XEN_BUILD_TAG=with_xen
else
Expand All @@ -13,7 +19,7 @@ endif

COMMIT=`git describe --dirty --always --tags 2> /dev/null || true`
GOLDFLAGS="-X main.gitCommit=${COMMIT} -X main.version=${VERSION}"
HYPER_BULD_TAGS=$(XEN_BUILD_TAG) $(LIBVIRT_BUILD_TAG)
HYPER_BULD_TAGS=$(XEN_BUILD_TAG) $(LIBVIRT_BUILD_TAG) $(9P_BUILD_TAG)

all-local: build-runv
clean-local:
Expand Down
12 changes: 7 additions & 5 deletions cli/sandbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,11 +69,12 @@ func setupFactory(context *cli.Context, spec *specs.Spec) (factory.Factory, erro
return singlefactory.New(templatefactory.NewFromExisted(tconfig)), nil
}
bootConfig := hypervisor.BootConfig{
Kernel: kernel,
Initrd: initrd,
Bios: bios,
Cbfs: cbfs,
EnableVsock: vsock,
Kernel: kernel,
Initrd: initrd,
Bios: bios,
Cbfs: cbfs,
EnableVsock: vsock,
ContainerRootFs: spec.Root.Path,
}
return singlefactory.Dummy(bootConfig), nil
}
Expand Down Expand Up @@ -153,6 +154,7 @@ func destroySandbox(vm *hypervisor.Vm, lockFile *os.File) {
glog.Errorf("StopPod timeout")
}
vm.Kill()
os.RemoveAll("/tmp/" + vm.Id)

// cli refactor todo: kill the proxy if vm.Shutdown() failed.

Expand Down
10 changes: 10 additions & 0 deletions configure.ac
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,12 @@ esac

# Checks for libraries.

AC_ARG_WITH([9p],
[AS_HELP_STRING([--without-9p],
[run runv with 9p])],
[with_9p=no],[with_9p=yes])


LIBVIRT_REQUIRED="1.2.2"

AC_ARG_WITH([libvirt],
Expand Down Expand Up @@ -76,6 +82,8 @@ fi

AM_CONDITIONAL([WITH_XEN], [test "x$with_xen" == "xyes"])

AM_CONDITIONAL([WITH_9P], [test "x$with_9p" == "xyes"])

AC_CONFIG_FILES([Makefile])

AC_OUTPUT
Expand All @@ -90,4 +98,6 @@ AC_MSG_RESULT([
with xen: ${with_xen}

with libvirt: ${with_libvirt}

with 9p: ${with_9p}
])
2 changes: 1 addition & 1 deletion hyperstart/api/json/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ type Pod struct {
DnsOptions []string `json:"dnsOptions,omitempty"`
DnsSearch []string `json:"dnsSearch,omitempty"`
DeprecatedRoutes []Route `json:"routes,omitempty"`
ShareDir string `json:"shareDir"`
ShareDir string `json:"shareDir,omitempty"`
PortmappingWhiteLists *PortmappingWhiteList `json:"portmappingWhiteLists,omitempty"`
}

Expand Down
9 changes: 8 additions & 1 deletion hyperstart/proxy/proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
hyperstartgrpc "github.com/hyperhq/runv/hyperstart/api/grpc"
hyperstartjson "github.com/hyperhq/runv/hyperstart/api/json"
"github.com/hyperhq/runv/hyperstart/libhyperstart"
"github.com/hyperhq/runv/hypervisor"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
Expand Down Expand Up @@ -108,10 +109,16 @@ func (proxy *jsonProxy) TtyWinResize(ctx context.Context, req *hyperstartgrpc.Tt
}

func (proxy *jsonProxy) StartSandbox(ctx context.Context, req *hyperstartgrpc.StartSandboxRequest) (*google_protobuf.Empty, error) {
var sharedDir string
if hypervisor.Is9pfsSupported() {
sharedDir = "share_dir"
} else {
sharedDir = ""
}
pod := &hyperstartjson.Pod{
Hostname: req.Hostname,
Dns: req.Dns,
ShareDir: "share_dir",
ShareDir: sharedDir,
}
err := proxy.json.StartSandbox(pod)
return pbEmpty(err), err
Expand Down
1 change: 1 addition & 0 deletions hypervisor/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ type BootConfig struct {
Initrd string
Bios string
Cbfs string
ContainerRootFs string

// For network QoS (kilobytes/s)
InboundAverage string
Expand Down
14 changes: 11 additions & 3 deletions hypervisor/qemu/qemu_amd64.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,17 @@ func (qc *QemuContext) arguments(ctx *hypervisor.VmContext) []string {
"-kernel", boot.Kernel, "-initrd", boot.Initrd, "-append", cmdline)
}

if !hypervisor.Is9pfsSupported() {
params = append(params, "-device", "virtio-scsi-pci,id=scsi0,bus=pci.0,addr=0x6")
params = append(params, "-drive", "file=/tmp/"+ctx.Id+"/rootfs.img,format=qcow2,if=none,id=drive-scsi0-1-0-0")
params = append(params, "-device", "scsi-hd,bus=scsi0.0,channel=0,scsi-id=1,lun=0,drive=drive-scsi0-1-0-0,id=scsi0-1-0-0")
params = append(params, "-device", "virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x2", "-device", "virtio-scsi-pci,id=scsi1,bus=pci.0,addr=0x3")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Disks should be inserted via special APIs()
the disk path should be specified in the container config.
This will result in doing disk hotplug.
(Avoiding disk hotplug is possible, but it requires some refactor to the code)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

By disk hotplug, are you suggesting that we could attach a disk to pod VM after it has started? If yes, could you help me point to that API?

Also, the main motivation behind this PR is to make 9pfs optional. The way the code is structured right now makes all pod VMs to have 9pfs.

Copy link
Contributor

@laijs laijs Nov 14, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes. This is the way how hyperd/hyperctl/hyper.sh work on the images/volumes. We have the APIs ready, only except that runv-cli hasn't call these APIs so far, which can be changed. hyperd/hyperctl/hyper.sh doesn't necessary depend on 9pfs either, we are glad to make it allow to be configured out.

To hotplug add the rootfs image, you can just put the image info into RootVolume
of the ContainerDescription.
https://github.com/hyperhq/runv/blob/master/api/descriptions.pb.go#L99

The code for setting RootVolume might be here:
https://github.com/hyperhq/runv/blob/master/cli/container.go#L76-L77

Copy link
Member

@gnawux gnawux Nov 14, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@harche here when we create container, we created it with a spec here by protobuf, in which defined a rootVolume with VolumeDescription:

message VolumeDescription {
    string name = 1;
    string source = 2;
    string format = 3; //"raw" (or "qcow2" later) for volume, "vfs" for dir path
    string fstype = 4; //"xfs", "ext4" etc. for block dev, or "dir" for dir path
    VolumeOption options = 8;
    bool dockerVolume = 9;
    bool readOnly = 10;
}

For block devices, you could specify Source as the path of device, Format as raw, and Fstype as xfs or ext4 or any other fs in case the guest kernel supports it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am trying to implement the suggested approach above, but I am getting following error, https://pastebin.com/raw/UsfXNfdS

The change I made was, https://pastebin.com/raw/KLMxdJ63

} else {
params = append(params, "-device", "virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x2", "-device", "virtio-scsi-pci,id=scsi0,bus=pci.0,addr=0x3")
params = append(params, "-fsdev", fmt.Sprintf("local,id=virtio9p,path=%s,security_model=none", ctx.ShareDir))
params = append(params, "-device", fmt.Sprintf("virtio-9p-pci,fsdev=virtio9p,mount_tag=%s", hypervisor.ShareDirTag))
}

params = append(params,
"-realtime", "mlock=off", "-no-user-config", "-nodefaults", "-no-hpet",
"-rtc", "base=utc,clock=vm,driftfix=slew", "-no-reboot", "-display", "none", "-boot", "strict=on",
Expand All @@ -82,12 +93,9 @@ func (qc *QemuContext) arguments(ctx *hypervisor.VmContext) []string {
}

return append(params, "-qmp", fmt.Sprintf("unix:%s,server,nowait", qc.qmpSockName), "-serial", fmt.Sprintf("unix:%s,server,nowait", ctx.ConsoleSockName),
"-device", "virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x2", "-device", "virtio-scsi-pci,id=scsi0,bus=pci.0,addr=0x3",
"-chardev", fmt.Sprintf("socket,id=charch0,path=%s,server,nowait", ctx.HyperSockName),
"-device", "virtserialport,bus=virtio-serial0.0,nr=1,chardev=charch0,id=channel0,name=sh.hyper.channel.0",
"-chardev", fmt.Sprintf("socket,id=charch1,path=%s,server,nowait", ctx.TtySockName),
"-device", "virtserialport,bus=virtio-serial0.0,nr=2,chardev=charch1,id=channel1,name=sh.hyper.channel.1",
"-fsdev", fmt.Sprintf("local,id=virtio9p,path=%s,security_model=none", ctx.ShareDir),
"-device", fmt.Sprintf("virtio-9p-pci,fsdev=virtio9p,mount_tag=%s", hypervisor.ShareDirTag),
)
}
40 changes: 40 additions & 0 deletions hypervisor/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
Expand Down Expand Up @@ -771,6 +772,18 @@ func GetVm(vmId string, b *BootConfig, waitStarted bool) (*Vm, error) {
}
}

if !Is9pfsSupported() {
_, err := exec.LookPath("virt-make-fs")
if err != nil {
return nil, err
} else {
err = createRootFSDisk(id, b.ContainerRootFs)
if err != nil {
return nil, err
}
}
}

vm := newVm(id, b.CPU, b.Memory)
if err := vm.launch(b); err != nil {
return nil, err
Expand All @@ -788,3 +801,30 @@ func GetVm(vmId string, b *BootConfig, waitStarted bool) (*Vm, error) {
vm.Log(TRACE, "GetVm succeeded")
return vm, nil
}

func createRootFSDisk(containerID string, rootfsPath string) error {

//Container RootFS spec.Root.Path
//Create RootFS raw image at runqQemuRoot + "/vm_image" + vmdata.ContainerID
cmd := exec.Command("/bin/mkdir", "-p", "/tmp/"+containerID)
err := cmd.Run()
if err != nil {
return fmt.Errorf("runq error: mkdir failed: %v", err)
}
//Create rootfs.img
cmd = exec.Command("/usr/bin/virt-make-fs", "--label=rootfs", "-F", "qcow2", "-s", "+512M", "-t", "ext4", rootfsPath,
"/tmp/"+containerID+"/rootfs.img")
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
fmt.Printf("Stderr %s", cmd.Stderr)
fmt.Printf("Stdout %s", cmd.Stdout)
return fmt.Errorf("runq error: virt-make-fs failed: %v", err)
}

return nil

}
7 changes: 7 additions & 0 deletions hypervisor/with_9pfs.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
// +build linux,amd64,with_9p

package hypervisor

func Is9pfsSupported() bool {
return true
}
7 changes: 7 additions & 0 deletions hypervisor/without_9pfs.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
// +build linux,amd64,!with_9p

package hypervisor

func Is9pfsSupported() bool {
return false
}