oc new-app -f ../docker-compose.yml
oc import docker-compose -f ../docker-compose.yml
| ... | ... |
@@ -7835,6 +7835,101 @@ _oc_convert() |
| 7835 | 7835 |
must_have_one_noun=() |
| 7836 | 7836 |
} |
| 7837 | 7837 |
|
| 7838 |
+_oc_import_docker-compose() |
|
| 7839 |
+{
|
|
| 7840 |
+ last_command="oc_import_docker-compose" |
|
| 7841 |
+ commands=() |
|
| 7842 |
+ |
|
| 7843 |
+ flags=() |
|
| 7844 |
+ two_word_flags=() |
|
| 7845 |
+ flags_with_completion=() |
|
| 7846 |
+ flags_completion=() |
|
| 7847 |
+ |
|
| 7848 |
+ flags+=("--as-template=")
|
|
| 7849 |
+ flags+=("--dry-run")
|
|
| 7850 |
+ flags+=("--filename=")
|
|
| 7851 |
+ flags_with_completion+=("--filename")
|
|
| 7852 |
+ flags_completion+=("__handle_filename_extension_flag json|yaml|yml")
|
|
| 7853 |
+ two_word_flags+=("-f")
|
|
| 7854 |
+ flags_with_completion+=("-f")
|
|
| 7855 |
+ flags_completion+=("__handle_filename_extension_flag json|yaml|yml")
|
|
| 7856 |
+ flags+=("--generator=")
|
|
| 7857 |
+ flags+=("--output=")
|
|
| 7858 |
+ two_word_flags+=("-o")
|
|
| 7859 |
+ flags+=("--output-version=")
|
|
| 7860 |
+ flags+=("--api-version=")
|
|
| 7861 |
+ flags+=("--as=")
|
|
| 7862 |
+ flags+=("--certificate-authority=")
|
|
| 7863 |
+ flags_with_completion+=("--certificate-authority")
|
|
| 7864 |
+ flags_completion+=("_filedir")
|
|
| 7865 |
+ flags+=("--client-certificate=")
|
|
| 7866 |
+ flags_with_completion+=("--client-certificate")
|
|
| 7867 |
+ flags_completion+=("_filedir")
|
|
| 7868 |
+ flags+=("--client-key=")
|
|
| 7869 |
+ flags_with_completion+=("--client-key")
|
|
| 7870 |
+ flags_completion+=("_filedir")
|
|
| 7871 |
+ flags+=("--cluster=")
|
|
| 7872 |
+ flags+=("--config=")
|
|
| 7873 |
+ flags_with_completion+=("--config")
|
|
| 7874 |
+ flags_completion+=("_filedir")
|
|
| 7875 |
+ flags+=("--context=")
|
|
| 7876 |
+ flags+=("--google-json-key=")
|
|
| 7877 |
+ flags+=("--insecure-skip-tls-verify")
|
|
| 7878 |
+ flags+=("--log-flush-frequency=")
|
|
| 7879 |
+ flags+=("--match-server-version")
|
|
| 7880 |
+ flags+=("--namespace=")
|
|
| 7881 |
+ two_word_flags+=("-n")
|
|
| 7882 |
+ flags+=("--server=")
|
|
| 7883 |
+ flags+=("--token=")
|
|
| 7884 |
+ flags+=("--user=")
|
|
| 7885 |
+ |
|
| 7886 |
+ must_have_one_flag=() |
|
| 7887 |
+ must_have_one_flag+=("--filename=")
|
|
| 7888 |
+ must_have_one_flag+=("-f")
|
|
| 7889 |
+ must_have_one_noun=() |
|
| 7890 |
+} |
|
| 7891 |
+ |
|
| 7892 |
+_oc_import() |
|
| 7893 |
+{
|
|
| 7894 |
+ last_command="oc_import" |
|
| 7895 |
+ commands=() |
|
| 7896 |
+ commands+=("docker-compose")
|
|
| 7897 |
+ |
|
| 7898 |
+ flags=() |
|
| 7899 |
+ two_word_flags=() |
|
| 7900 |
+ flags_with_completion=() |
|
| 7901 |
+ flags_completion=() |
|
| 7902 |
+ |
|
| 7903 |
+ flags+=("--api-version=")
|
|
| 7904 |
+ flags+=("--as=")
|
|
| 7905 |
+ flags+=("--certificate-authority=")
|
|
| 7906 |
+ flags_with_completion+=("--certificate-authority")
|
|
| 7907 |
+ flags_completion+=("_filedir")
|
|
| 7908 |
+ flags+=("--client-certificate=")
|
|
| 7909 |
+ flags_with_completion+=("--client-certificate")
|
|
| 7910 |
+ flags_completion+=("_filedir")
|
|
| 7911 |
+ flags+=("--client-key=")
|
|
| 7912 |
+ flags_with_completion+=("--client-key")
|
|
| 7913 |
+ flags_completion+=("_filedir")
|
|
| 7914 |
+ flags+=("--cluster=")
|
|
| 7915 |
+ flags+=("--config=")
|
|
| 7916 |
+ flags_with_completion+=("--config")
|
|
| 7917 |
+ flags_completion+=("_filedir")
|
|
| 7918 |
+ flags+=("--context=")
|
|
| 7919 |
+ flags+=("--google-json-key=")
|
|
| 7920 |
+ flags+=("--insecure-skip-tls-verify")
|
|
| 7921 |
+ flags+=("--log-flush-frequency=")
|
|
| 7922 |
+ flags+=("--match-server-version")
|
|
| 7923 |
+ flags+=("--namespace=")
|
|
| 7924 |
+ two_word_flags+=("-n")
|
|
| 7925 |
+ flags+=("--server=")
|
|
| 7926 |
+ flags+=("--token=")
|
|
| 7927 |
+ flags+=("--user=")
|
|
| 7928 |
+ |
|
| 7929 |
+ must_have_one_flag=() |
|
| 7930 |
+ must_have_one_noun=() |
|
| 7931 |
+} |
|
| 7932 |
+ |
|
| 7838 | 7933 |
_oc_logout() |
| 7839 | 7934 |
{
|
| 7840 | 7935 |
last_command="oc_logout" |
| ... | ... |
@@ -8591,6 +8686,7 @@ _oc() |
| 8591 | 8591 |
commands+=("export")
|
| 8592 | 8592 |
commands+=("policy")
|
| 8593 | 8593 |
commands+=("convert")
|
| 8594 |
+ commands+=("import")
|
|
| 8594 | 8595 |
commands+=("logout")
|
| 8595 | 8596 |
commands+=("config")
|
| 8596 | 8597 |
commands+=("whoami")
|
| ... | ... |
@@ -11420,6 +11420,101 @@ _openshift_cli_convert() |
| 11420 | 11420 |
must_have_one_noun=() |
| 11421 | 11421 |
} |
| 11422 | 11422 |
|
| 11423 |
+_openshift_cli_import_docker-compose() |
|
| 11424 |
+{
|
|
| 11425 |
+ last_command="openshift_cli_import_docker-compose" |
|
| 11426 |
+ commands=() |
|
| 11427 |
+ |
|
| 11428 |
+ flags=() |
|
| 11429 |
+ two_word_flags=() |
|
| 11430 |
+ flags_with_completion=() |
|
| 11431 |
+ flags_completion=() |
|
| 11432 |
+ |
|
| 11433 |
+ flags+=("--as-template=")
|
|
| 11434 |
+ flags+=("--dry-run")
|
|
| 11435 |
+ flags+=("--filename=")
|
|
| 11436 |
+ flags_with_completion+=("--filename")
|
|
| 11437 |
+ flags_completion+=("__handle_filename_extension_flag json|yaml|yml")
|
|
| 11438 |
+ two_word_flags+=("-f")
|
|
| 11439 |
+ flags_with_completion+=("-f")
|
|
| 11440 |
+ flags_completion+=("__handle_filename_extension_flag json|yaml|yml")
|
|
| 11441 |
+ flags+=("--generator=")
|
|
| 11442 |
+ flags+=("--output=")
|
|
| 11443 |
+ two_word_flags+=("-o")
|
|
| 11444 |
+ flags+=("--output-version=")
|
|
| 11445 |
+ flags+=("--api-version=")
|
|
| 11446 |
+ flags+=("--as=")
|
|
| 11447 |
+ flags+=("--certificate-authority=")
|
|
| 11448 |
+ flags_with_completion+=("--certificate-authority")
|
|
| 11449 |
+ flags_completion+=("_filedir")
|
|
| 11450 |
+ flags+=("--client-certificate=")
|
|
| 11451 |
+ flags_with_completion+=("--client-certificate")
|
|
| 11452 |
+ flags_completion+=("_filedir")
|
|
| 11453 |
+ flags+=("--client-key=")
|
|
| 11454 |
+ flags_with_completion+=("--client-key")
|
|
| 11455 |
+ flags_completion+=("_filedir")
|
|
| 11456 |
+ flags+=("--cluster=")
|
|
| 11457 |
+ flags+=("--config=")
|
|
| 11458 |
+ flags_with_completion+=("--config")
|
|
| 11459 |
+ flags_completion+=("_filedir")
|
|
| 11460 |
+ flags+=("--context=")
|
|
| 11461 |
+ flags+=("--google-json-key=")
|
|
| 11462 |
+ flags+=("--insecure-skip-tls-verify")
|
|
| 11463 |
+ flags+=("--log-flush-frequency=")
|
|
| 11464 |
+ flags+=("--match-server-version")
|
|
| 11465 |
+ flags+=("--namespace=")
|
|
| 11466 |
+ two_word_flags+=("-n")
|
|
| 11467 |
+ flags+=("--server=")
|
|
| 11468 |
+ flags+=("--token=")
|
|
| 11469 |
+ flags+=("--user=")
|
|
| 11470 |
+ |
|
| 11471 |
+ must_have_one_flag=() |
|
| 11472 |
+ must_have_one_flag+=("--filename=")
|
|
| 11473 |
+ must_have_one_flag+=("-f")
|
|
| 11474 |
+ must_have_one_noun=() |
|
| 11475 |
+} |
|
| 11476 |
+ |
|
| 11477 |
+_openshift_cli_import() |
|
| 11478 |
+{
|
|
| 11479 |
+ last_command="openshift_cli_import" |
|
| 11480 |
+ commands=() |
|
| 11481 |
+ commands+=("docker-compose")
|
|
| 11482 |
+ |
|
| 11483 |
+ flags=() |
|
| 11484 |
+ two_word_flags=() |
|
| 11485 |
+ flags_with_completion=() |
|
| 11486 |
+ flags_completion=() |
|
| 11487 |
+ |
|
| 11488 |
+ flags+=("--api-version=")
|
|
| 11489 |
+ flags+=("--as=")
|
|
| 11490 |
+ flags+=("--certificate-authority=")
|
|
| 11491 |
+ flags_with_completion+=("--certificate-authority")
|
|
| 11492 |
+ flags_completion+=("_filedir")
|
|
| 11493 |
+ flags+=("--client-certificate=")
|
|
| 11494 |
+ flags_with_completion+=("--client-certificate")
|
|
| 11495 |
+ flags_completion+=("_filedir")
|
|
| 11496 |
+ flags+=("--client-key=")
|
|
| 11497 |
+ flags_with_completion+=("--client-key")
|
|
| 11498 |
+ flags_completion+=("_filedir")
|
|
| 11499 |
+ flags+=("--cluster=")
|
|
| 11500 |
+ flags+=("--config=")
|
|
| 11501 |
+ flags_with_completion+=("--config")
|
|
| 11502 |
+ flags_completion+=("_filedir")
|
|
| 11503 |
+ flags+=("--context=")
|
|
| 11504 |
+ flags+=("--google-json-key=")
|
|
| 11505 |
+ flags+=("--insecure-skip-tls-verify")
|
|
| 11506 |
+ flags+=("--log-flush-frequency=")
|
|
| 11507 |
+ flags+=("--match-server-version")
|
|
| 11508 |
+ flags+=("--namespace=")
|
|
| 11509 |
+ two_word_flags+=("-n")
|
|
| 11510 |
+ flags+=("--server=")
|
|
| 11511 |
+ flags+=("--token=")
|
|
| 11512 |
+ flags+=("--user=")
|
|
| 11513 |
+ |
|
| 11514 |
+ must_have_one_flag=() |
|
| 11515 |
+ must_have_one_noun=() |
|
| 11516 |
+} |
|
| 11517 |
+ |
|
| 11423 | 11518 |
_openshift_cli_logout() |
| 11424 | 11519 |
{
|
| 11425 | 11520 |
last_command="openshift_cli_logout" |
| ... | ... |
@@ -12176,6 +12271,7 @@ _openshift_cli() |
| 12176 | 12176 |
commands+=("export")
|
| 12177 | 12177 |
commands+=("policy")
|
| 12178 | 12178 |
commands+=("convert")
|
| 12179 |
+ commands+=("import")
|
|
| 12179 | 12180 |
commands+=("logout")
|
| 12180 | 12181 |
commands+=("config")
|
| 12181 | 12182 |
commands+=("whoami")
|
| ... | ... |
@@ -1240,6 +1240,23 @@ Display one or many resources |
| 1240 | 1240 |
==== |
| 1241 | 1241 |
|
| 1242 | 1242 |
|
| 1243 |
+== oc import docker-compose |
|
| 1244 |
+Import a docker-compose.yml project into OpenShift |
|
| 1245 |
+ |
|
| 1246 |
+==== |
|
| 1247 |
+ |
|
| 1248 |
+[options="nowrap"] |
|
| 1249 |
+---- |
|
| 1250 |
+ # Import a docker-compose.yml file into OpenShift |
|
| 1251 |
+ $ oc import docker-compose -f ./docker-compose.yml |
|
| 1252 |
+ |
|
| 1253 |
+ # Turn a docker-compose.yml file into a template |
|
| 1254 |
+ $ oc import docker-compose -f ./docker-compose.yml -o yaml --as-template |
|
| 1255 |
+ |
|
| 1256 |
+---- |
|
| 1257 |
+==== |
|
| 1258 |
+ |
|
| 1259 |
+ |
|
| 1243 | 1260 |
== oc import-image |
| 1244 | 1261 |
Imports images from a Docker registry |
| 1245 | 1262 |
|
| ... | ... |
@@ -16,6 +16,7 @@ import ( |
| 16 | 16 |
"github.com/openshift/origin/pkg/cmd/admin" |
| 17 | 17 |
"github.com/openshift/origin/pkg/cmd/cli/cmd" |
| 18 | 18 |
"github.com/openshift/origin/pkg/cmd/cli/cmd/dockerbuild" |
| 19 |
+ "github.com/openshift/origin/pkg/cmd/cli/cmd/importer" |
|
| 19 | 20 |
"github.com/openshift/origin/pkg/cmd/cli/cmd/rsync" |
| 20 | 21 |
"github.com/openshift/origin/pkg/cmd/cli/cmd/set" |
| 21 | 22 |
"github.com/openshift/origin/pkg/cmd/cli/policy" |
| ... | ... |
@@ -153,6 +154,7 @@ func NewCommandCLI(name, fullName string, in io.Reader, out, errout io.Writer) * |
| 153 | 153 |
cmd.NewCmdExport(fullName, f, in, out), |
| 154 | 154 |
policy.NewCmdPolicy(policy.PolicyRecommendedName, fullName+" "+policy.PolicyRecommendedName, f, out), |
| 155 | 155 |
cmd.NewCmdConvert(fullName, f, out), |
| 156 |
+ importer.NewCmdImport(fullName, f, in, out, errout), |
|
| 156 | 157 |
}, |
| 157 | 158 |
}, |
| 158 | 159 |
{
|
| 159 | 160 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,182 @@ |
| 0 |
+package importer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io" |
|
| 5 |
+ "os" |
|
| 6 |
+ "strings" |
|
| 7 |
+ |
|
| 8 |
+ "github.com/spf13/cobra" |
|
| 9 |
+ |
|
| 10 |
+ kapi "k8s.io/kubernetes/pkg/api" |
|
| 11 |
+ "k8s.io/kubernetes/pkg/api/unversioned" |
|
| 12 |
+ "k8s.io/kubernetes/pkg/apimachinery/registered" |
|
| 13 |
+ "k8s.io/kubernetes/pkg/kubectl" |
|
| 14 |
+ kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" |
|
| 15 |
+ "k8s.io/kubernetes/pkg/runtime" |
|
| 16 |
+ |
|
| 17 |
+ "github.com/openshift/origin/pkg/client" |
|
| 18 |
+ cmdutil "github.com/openshift/origin/pkg/cmd/util" |
|
| 19 |
+ "github.com/openshift/origin/pkg/cmd/util/clientcmd" |
|
| 20 |
+ configcmd "github.com/openshift/origin/pkg/config/cmd" |
|
| 21 |
+ "github.com/openshift/origin/pkg/generate/app" |
|
| 22 |
+ appcmd "github.com/openshift/origin/pkg/generate/app/cmd" |
|
| 23 |
+ "github.com/openshift/origin/pkg/generate/dockercompose" |
|
| 24 |
+) |
|
| 25 |
+ |
|
| 26 |
+const ( |
|
| 27 |
+ dockerComposeLong = ` |
|
| 28 |
+Import a Docker Compose file as OpenShift objects |
|
| 29 |
+ |
|
| 30 |
+Docker Compose files offer a container centric build and deploy pattern for simple applications. |
|
| 31 |
+This command will transform a provided docker-compose.yml application into its OpenShift equivalent. |
|
| 32 |
+During transformation fields in the compose syntax that are not relevant when running on top of |
|
| 33 |
+a containerized platform will be ignored and a warning printed. |
|
| 34 |
+ |
|
| 35 |
+The command will create objects unless you pass the -o yaml or --as-template flags to generate a |
|
| 36 |
+configuration file for later use.` |
|
| 37 |
+ |
|
| 38 |
+ dockerComposeExample = ` # Import a docker-compose.yml file into OpenShift |
|
| 39 |
+ $ %[1]s docker-compose -f ./docker-compose.yml |
|
| 40 |
+ |
|
| 41 |
+ # Turn a docker-compose.yml file into a template |
|
| 42 |
+ $ %[1]s docker-compose -f ./docker-compose.yml -o yaml --as-template |
|
| 43 |
+` |
|
| 44 |
+ |
|
| 45 |
+ DockerComposeV1GeneratorName = "docker-compose/v1" |
|
| 46 |
+) |
|
| 47 |
+ |
|
| 48 |
+type DockerComposeOptions struct {
|
|
| 49 |
+ Action configcmd.BulkAction |
|
| 50 |
+ |
|
| 51 |
+ In io.Reader |
|
| 52 |
+ Filenames []string |
|
| 53 |
+ |
|
| 54 |
+ Generator string |
|
| 55 |
+ AsTemplate string |
|
| 56 |
+ |
|
| 57 |
+ PrintObject func(runtime.Object) error |
|
| 58 |
+ OutputVersions []unversioned.GroupVersion |
|
| 59 |
+ |
|
| 60 |
+ Namespace string |
|
| 61 |
+ Client client.TemplateConfigsNamespacer |
|
| 62 |
+} |
|
| 63 |
+ |
|
| 64 |
+// NewCmdDockerCompose imports a docker-compose file as a template. |
|
| 65 |
+func NewCmdDockerCompose(fullName string, f *clientcmd.Factory, in io.Reader, out, errout io.Writer) *cobra.Command {
|
|
| 66 |
+ options := &DockerComposeOptions{
|
|
| 67 |
+ Action: configcmd.BulkAction{
|
|
| 68 |
+ Out: out, |
|
| 69 |
+ ErrOut: errout, |
|
| 70 |
+ }, |
|
| 71 |
+ In: in, |
|
| 72 |
+ Generator: DockerComposeV1GeneratorName, |
|
| 73 |
+ } |
|
| 74 |
+ cmd := &cobra.Command{
|
|
| 75 |
+ Use: "docker-compose -f COMPOSEFILE", |
|
| 76 |
+ Short: "Import a docker-compose.yml project into OpenShift", |
|
| 77 |
+ Long: dockerComposeLong, |
|
| 78 |
+ Example: fmt.Sprintf(dockerComposeExample, fullName), |
|
| 79 |
+ Run: func(cmd *cobra.Command, args []string) {
|
|
| 80 |
+ kcmdutil.CheckErr(options.Complete(f, cmd, args)) |
|
| 81 |
+ kcmdutil.CheckErr(options.Validate()) |
|
| 82 |
+ if err := options.Run(); err != nil {
|
|
| 83 |
+ // TODO: move met to kcmdutil |
|
| 84 |
+ if err == cmdutil.ErrExit {
|
|
| 85 |
+ os.Exit(1) |
|
| 86 |
+ } |
|
| 87 |
+ kcmdutil.CheckErr(err) |
|
| 88 |
+ } |
|
| 89 |
+ }, |
|
| 90 |
+ } |
|
| 91 |
+ usage := "Filename, directory, or URL to docker-compose.yml file to use" |
|
| 92 |
+ kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) |
|
| 93 |
+ cmd.MarkFlagRequired("filename")
|
|
| 94 |
+ |
|
| 95 |
+ cmd.Flags().String("generator", options.Generator, "The name of the API generator to use.")
|
|
| 96 |
+ cmd.Flags().StringVar(&options.AsTemplate, "as-template", "", "If set, generate a template with the provided name") |
|
| 97 |
+ |
|
| 98 |
+ options.Action.BindForOutput(cmd.Flags()) |
|
| 99 |
+ cmd.Flags().String("output-version", "", "The preferred API versions of the output objects")
|
|
| 100 |
+ |
|
| 101 |
+ return cmd |
|
| 102 |
+} |
|
| 103 |
+ |
|
| 104 |
+func (o *DockerComposeOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []string) error {
|
|
| 105 |
+ version, _ := cmd.Flags().GetString("output-version")
|
|
| 106 |
+ for _, v := range strings.Split(version, ",") {
|
|
| 107 |
+ gv, err := unversioned.ParseGroupVersion(v) |
|
| 108 |
+ if err != nil {
|
|
| 109 |
+ return fmt.Errorf("provided output-version %q is not valid: %v", v, err)
|
|
| 110 |
+ } |
|
| 111 |
+ o.OutputVersions = append(o.OutputVersions, gv) |
|
| 112 |
+ } |
|
| 113 |
+ o.OutputVersions = append(o.OutputVersions, registered.EnabledVersions()...) |
|
| 114 |
+ |
|
| 115 |
+ o.Action.Bulk.Mapper = clientcmd.ResourceMapper(f) |
|
| 116 |
+ o.Action.Bulk.Op = configcmd.Create |
|
| 117 |
+ mapper, _ := f.Object(false) |
|
| 118 |
+ o.PrintObject = cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, o.Action.Out) |
|
| 119 |
+ |
|
| 120 |
+ o.Generator, _ = cmd.Flags().GetString("generator")
|
|
| 121 |
+ |
|
| 122 |
+ ns, _, err := f.DefaultNamespace() |
|
| 123 |
+ if err != nil {
|
|
| 124 |
+ return err |
|
| 125 |
+ } |
|
| 126 |
+ o.Namespace = ns |
|
| 127 |
+ |
|
| 128 |
+ o.Client, _, err = f.Clients() |
|
| 129 |
+ return err |
|
| 130 |
+} |
|
| 131 |
+ |
|
| 132 |
+func (o *DockerComposeOptions) Validate() error {
|
|
| 133 |
+ if len(o.Filenames) == 0 {
|
|
| 134 |
+ return fmt.Errorf("you must provide the paths to one or more docker-compose.yml files")
|
|
| 135 |
+ } |
|
| 136 |
+ switch o.Generator {
|
|
| 137 |
+ case DockerComposeV1GeneratorName: |
|
| 138 |
+ default: |
|
| 139 |
+ return fmt.Errorf("the generator %q is not supported, use: %s", o.Generator, DockerComposeV1GeneratorName)
|
|
| 140 |
+ } |
|
| 141 |
+ return nil |
|
| 142 |
+} |
|
| 143 |
+ |
|
| 144 |
+func (o *DockerComposeOptions) Run() error {
|
|
| 145 |
+ template, err := dockercompose.Generate(o.Filenames...) |
|
| 146 |
+ if err != nil {
|
|
| 147 |
+ return err |
|
| 148 |
+ } |
|
| 149 |
+ |
|
| 150 |
+ // all the types generated into the template should be known |
|
| 151 |
+ if errs := app.AsVersionedObjects(template.Objects, kapi.Scheme, kapi.Scheme, o.OutputVersions...); len(errs) > 0 {
|
|
| 152 |
+ for _, err := range errs {
|
|
| 153 |
+ fmt.Fprintf(o.Action.ErrOut, "error: %v\n", err) |
|
| 154 |
+ } |
|
| 155 |
+ } |
|
| 156 |
+ |
|
| 157 |
+ if o.Action.ShouldPrint() || (o.Action.Output == "name" && len(o.AsTemplate) > 0) {
|
|
| 158 |
+ var out runtime.Object |
|
| 159 |
+ if len(o.AsTemplate) > 0 {
|
|
| 160 |
+ template.Name = o.AsTemplate |
|
| 161 |
+ out = template |
|
| 162 |
+ } else {
|
|
| 163 |
+ out = &kapi.List{Items: template.Objects}
|
|
| 164 |
+ } |
|
| 165 |
+ return o.PrintObject(out) |
|
| 166 |
+ } |
|
| 167 |
+ |
|
| 168 |
+ result, err := appcmd.TransformTemplate(template, o.Client, o.Namespace, nil) |
|
| 169 |
+ if err != nil {
|
|
| 170 |
+ return err |
|
| 171 |
+ } |
|
| 172 |
+ |
|
| 173 |
+ if o.Action.Verbose() {
|
|
| 174 |
+ appcmd.DescribeGeneratedTemplate(o.Action.Out, "", result, o.Namespace) |
|
| 175 |
+ } |
|
| 176 |
+ |
|
| 177 |
+ if errs := o.Action.WithMessage("Importing compose file", "creating").Run(&kapi.List{Items: result.Objects}, o.Namespace); len(errs) > 0 {
|
|
| 178 |
+ return cmdutil.ErrExit |
|
| 179 |
+ } |
|
| 180 |
+ return nil |
|
| 181 |
+} |
| 0 | 182 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,33 @@ |
| 0 |
+package importer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/spf13/cobra" |
|
| 7 |
+ |
|
| 8 |
+ cmdutil "github.com/openshift/origin/pkg/cmd/util" |
|
| 9 |
+ "github.com/openshift/origin/pkg/cmd/util/clientcmd" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+const ( |
|
| 13 |
+ importLong = ` |
|
| 14 |
+Import outside applications into OpenShift |
|
| 15 |
+ |
|
| 16 |
+These commands assist in bringing existing applications into OpenShift.` |
|
| 17 |
+) |
|
| 18 |
+ |
|
| 19 |
+// NewCmdImport exposes commands for modifying objects. |
|
| 20 |
+func NewCmdImport(fullName string, f *clientcmd.Factory, in io.Reader, out, errout io.Writer) *cobra.Command {
|
|
| 21 |
+ cmd := &cobra.Command{
|
|
| 22 |
+ Use: "import COMMAND", |
|
| 23 |
+ Short: "Commands that import applications", |
|
| 24 |
+ Long: importLong, |
|
| 25 |
+ Run: cmdutil.DefaultSubCommandRun(out), |
|
| 26 |
+ } |
|
| 27 |
+ |
|
| 28 |
+ name := fmt.Sprintf("%s import", fullName)
|
|
| 29 |
+ |
|
| 30 |
+ cmd.AddCommand(NewCmdDockerCompose(name, f, in, out, errout)) |
|
| 31 |
+ return cmd |
|
| 32 |
+} |
| ... | ... |
@@ -289,10 +289,10 @@ func AddComponentInputsToRefBuilder(b *app.ReferenceBuilder, r *Resolvers, c *Co |
| 289 | 289 |
}) |
| 290 | 290 |
b.AddComponents(c.TemplateFiles, func(input *app.ComponentInput) app.ComponentReference {
|
| 291 | 291 |
input.Argument = fmt.Sprintf("--file=%q", input.From)
|
| 292 |
- input.Searcher = r.TemplateFileSearcher |
|
| 293 | 292 |
if r.TemplateFileSearcher != nil {
|
| 294 | 293 |
input.Resolver = app.FirstMatchResolver{Searcher: r.TemplateFileSearcher}
|
| 295 | 294 |
} |
| 295 |
+ input.Searcher = r.TemplateFileSearcher |
|
| 296 | 296 |
return input |
| 297 | 297 |
}) |
| 298 | 298 |
b.AddComponents(c.Components, func(input *app.ComponentInput) app.ComponentReference {
|
| 299 | 299 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,562 @@ |
| 0 |
+package dockercompose |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "net" |
|
| 5 |
+ "path/filepath" |
|
| 6 |
+ "strconv" |
|
| 7 |
+ "strings" |
|
| 8 |
+ |
|
| 9 |
+ "github.com/golang/glog" |
|
| 10 |
+ |
|
| 11 |
+ kapi "k8s.io/kubernetes/pkg/api" |
|
| 12 |
+ "k8s.io/kubernetes/pkg/api/resource" |
|
| 13 |
+ utilerrs "k8s.io/kubernetes/pkg/util/errors" |
|
| 14 |
+ "k8s.io/kubernetes/pkg/util/sets" |
|
| 15 |
+ |
|
| 16 |
+ deployapi "github.com/openshift/origin/pkg/deploy/api" |
|
| 17 |
+ "github.com/openshift/origin/pkg/generate/app" |
|
| 18 |
+ "github.com/openshift/origin/pkg/generate/git" |
|
| 19 |
+ templateapi "github.com/openshift/origin/pkg/template/api" |
|
| 20 |
+ dockerfileutil "github.com/openshift/origin/pkg/util/docker/dockerfile" |
|
| 21 |
+ "github.com/openshift/origin/third_party/github.com/docker/libcompose/project" |
|
| 22 |
+) |
|
| 23 |
+ |
|
| 24 |
+func IsPossibleDockerCompose(path string) bool {
|
|
| 25 |
+ switch base := filepath.Base(path); {
|
|
| 26 |
+ case base == "docker-compose.yaml", base == "docker-compose.yml": |
|
| 27 |
+ return true |
|
| 28 |
+ default: |
|
| 29 |
+ return false |
|
| 30 |
+ } |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// Generate accepts a set of Docker compose project paths and converts them in an |
|
| 34 |
+// OpenShift template definition. |
|
| 35 |
+func Generate(paths ...string) (*templateapi.Template, error) {
|
|
| 36 |
+ for i := range paths {
|
|
| 37 |
+ path, err := filepath.Abs(paths[i]) |
|
| 38 |
+ if err != nil {
|
|
| 39 |
+ return nil, err |
|
| 40 |
+ } |
|
| 41 |
+ paths[i] = path |
|
| 42 |
+ } |
|
| 43 |
+ var bases []string |
|
| 44 |
+ for _, s := range paths {
|
|
| 45 |
+ bases = append(bases, filepath.Dir(s)) |
|
| 46 |
+ } |
|
| 47 |
+ |
|
| 48 |
+ context := &project.Context{
|
|
| 49 |
+ ComposeFiles: paths, |
|
| 50 |
+ } |
|
| 51 |
+ p := project.NewProject(context) |
|
| 52 |
+ if err := p.Parse(); err != nil {
|
|
| 53 |
+ return nil, err |
|
| 54 |
+ } |
|
| 55 |
+ template := &templateapi.Template{}
|
|
| 56 |
+ template.Name = p.Name |
|
| 57 |
+ |
|
| 58 |
+ serviceOrder := sets.NewString() |
|
| 59 |
+ warnings := make(map[string][]string) |
|
| 60 |
+ for k, v := range p.Configs {
|
|
| 61 |
+ serviceOrder.Insert(k) |
|
| 62 |
+ warnUnusableComposeElements(k, v, warnings) |
|
| 63 |
+ } |
|
| 64 |
+ |
|
| 65 |
+ g := app.NewImageRefGenerator() |
|
| 66 |
+ |
|
| 67 |
+ var errs []error |
|
| 68 |
+ var pipelines app.PipelineGroup |
|
| 69 |
+ builds := make(map[string]*app.Pipeline) |
|
| 70 |
+ |
|
| 71 |
+ // identify colocated components due to shared volumes |
|
| 72 |
+ joins := make(map[string]sets.String) |
|
| 73 |
+ volumesFrom := make(map[string][]string) |
|
| 74 |
+ for _, k := range serviceOrder.List() {
|
|
| 75 |
+ if joins[k] == nil {
|
|
| 76 |
+ joins[k] = sets.NewString(k) |
|
| 77 |
+ } |
|
| 78 |
+ v := p.Configs[k] |
|
| 79 |
+ for _, from := range v.VolumesFrom {
|
|
| 80 |
+ switch parts := strings.Split(from, ":"); len(parts) {
|
|
| 81 |
+ case 1: |
|
| 82 |
+ joins[k].Insert(parts[0]) |
|
| 83 |
+ volumesFrom[k] = append(volumesFrom[k], parts[0]) |
|
| 84 |
+ case 2: |
|
| 85 |
+ target := parts[1] |
|
| 86 |
+ if parts[1] == "ro" || parts[1] == "rw" {
|
|
| 87 |
+ target = parts[0] |
|
| 88 |
+ } |
|
| 89 |
+ joins[k].Insert(target) |
|
| 90 |
+ volumesFrom[k] = append(volumesFrom[k], target) |
|
| 91 |
+ case 3: |
|
| 92 |
+ joins[k].Insert(parts[1]) |
|
| 93 |
+ volumesFrom[k] = append(volumesFrom[k], parts[1]) |
|
| 94 |
+ } |
|
| 95 |
+ } |
|
| 96 |
+ } |
|
| 97 |
+ joinOrder := sets.NewString() |
|
| 98 |
+ for k := range joins {
|
|
| 99 |
+ joinOrder.Insert(k) |
|
| 100 |
+ } |
|
| 101 |
+ var colocated []sets.String |
|
| 102 |
+ for _, k := range joinOrder.List() {
|
|
| 103 |
+ set := joins[k] |
|
| 104 |
+ matched := -1 |
|
| 105 |
+ for i, existing := range colocated {
|
|
| 106 |
+ if set.Intersection(existing).Len() == 0 {
|
|
| 107 |
+ continue |
|
| 108 |
+ } |
|
| 109 |
+ if matched != -1 {
|
|
| 110 |
+ return nil, fmt.Errorf("%q belongs with %v, but %v also contains some overlapping elements", k, set, colocated[matched])
|
|
| 111 |
+ } |
|
| 112 |
+ existing.Insert(set.List()...) |
|
| 113 |
+ matched = i |
|
| 114 |
+ continue |
|
| 115 |
+ } |
|
| 116 |
+ if matched == -1 {
|
|
| 117 |
+ colocated = append(colocated, set) |
|
| 118 |
+ } |
|
| 119 |
+ } |
|
| 120 |
+ |
|
| 121 |
+ // identify service aliases |
|
| 122 |
+ aliases := make(map[string]sets.String) |
|
| 123 |
+ for _, v := range p.Configs {
|
|
| 124 |
+ for _, s := range v.Links.Slice() {
|
|
| 125 |
+ parts := strings.SplitN(s, ":", 2) |
|
| 126 |
+ if len(parts) != 2 || parts[0] == parts[1] {
|
|
| 127 |
+ continue |
|
| 128 |
+ } |
|
| 129 |
+ set := aliases[parts[0]] |
|
| 130 |
+ if set == nil {
|
|
| 131 |
+ set = sets.NewString() |
|
| 132 |
+ aliases[parts[0]] = set |
|
| 133 |
+ } |
|
| 134 |
+ set.Insert(parts[1]) |
|
| 135 |
+ } |
|
| 136 |
+ } |
|
| 137 |
+ |
|
| 138 |
+ // find and define build pipelines |
|
| 139 |
+ for _, k := range serviceOrder.List() {
|
|
| 140 |
+ v := p.Configs[k] |
|
| 141 |
+ if len(v.Build) == 0 {
|
|
| 142 |
+ continue |
|
| 143 |
+ } |
|
| 144 |
+ if _, ok := builds[v.Build]; ok {
|
|
| 145 |
+ continue |
|
| 146 |
+ } |
|
| 147 |
+ var base, relative string |
|
| 148 |
+ for _, s := range bases {
|
|
| 149 |
+ if !strings.HasPrefix(v.Build, s) {
|
|
| 150 |
+ continue |
|
| 151 |
+ } |
|
| 152 |
+ base = s |
|
| 153 |
+ path, err := filepath.Rel(base, v.Build) |
|
| 154 |
+ if err != nil {
|
|
| 155 |
+ return nil, fmt.Errorf("path is not relative to base: %v", err)
|
|
| 156 |
+ } |
|
| 157 |
+ relative = path |
|
| 158 |
+ break |
|
| 159 |
+ } |
|
| 160 |
+ if len(base) == 0 {
|
|
| 161 |
+ return nil, fmt.Errorf("build path outside of the compose file: %s", v.Build)
|
|
| 162 |
+ } |
|
| 163 |
+ |
|
| 164 |
+ // if this is a Git repository, make the path relative |
|
| 165 |
+ if root, err := git.NewRepository().GetRootDir(base); err == nil {
|
|
| 166 |
+ if relative, err = filepath.Rel(root, filepath.Join(base, relative)); err != nil {
|
|
| 167 |
+ return nil, fmt.Errorf("unable to find relative path for Git repository: %v", err)
|
|
| 168 |
+ } |
|
| 169 |
+ base = root |
|
| 170 |
+ } |
|
| 171 |
+ buildPath := filepath.Join(base, relative) |
|
| 172 |
+ |
|
| 173 |
+ // TODO: what if there is no origin for this repo? |
|
| 174 |
+ |
|
| 175 |
+ glog.V(4).Infof("compose service: %#v", v)
|
|
| 176 |
+ repo, err := app.NewSourceRepositoryWithDockerfile(buildPath, "") |
|
| 177 |
+ if err != nil {
|
|
| 178 |
+ errs = append(errs, err) |
|
| 179 |
+ continue |
|
| 180 |
+ } |
|
| 181 |
+ repo.BuildWithDocker() |
|
| 182 |
+ |
|
| 183 |
+ info := repo.Info() |
|
| 184 |
+ if info == nil || info.Dockerfile == nil {
|
|
| 185 |
+ errs = append(errs, fmt.Errorf("unable to locate a Dockerfile in %s", v.Build))
|
|
| 186 |
+ continue |
|
| 187 |
+ } |
|
| 188 |
+ node := info.Dockerfile.AST() |
|
| 189 |
+ baseImage := dockerfileutil.LastBaseImage(node) |
|
| 190 |
+ if len(baseImage) == 0 {
|
|
| 191 |
+ errs = append(errs, fmt.Errorf("the Dockerfile in the repository %q has no FROM instruction", info.Path))
|
|
| 192 |
+ continue |
|
| 193 |
+ } |
|
| 194 |
+ |
|
| 195 |
+ var ports []string |
|
| 196 |
+ for _, s := range v.Ports {
|
|
| 197 |
+ container, _ := extractFirstPorts(s) |
|
| 198 |
+ ports = append(ports, container) |
|
| 199 |
+ } |
|
| 200 |
+ |
|
| 201 |
+ image, err := g.FromNameAndPorts(baseImage, ports) |
|
| 202 |
+ if err != nil {
|
|
| 203 |
+ errs = append(errs, err) |
|
| 204 |
+ continue |
|
| 205 |
+ } |
|
| 206 |
+ image.AsImageStream = true |
|
| 207 |
+ image.TagDirectly = true |
|
| 208 |
+ image.ObjectName = k |
|
| 209 |
+ image.Tag = "from" |
|
| 210 |
+ |
|
| 211 |
+ pipeline, err := app.NewPipelineBuilder(k, nil, false).To(k).NewBuildPipeline(k, image, repo) |
|
| 212 |
+ if err != nil {
|
|
| 213 |
+ errs = append(errs, err) |
|
| 214 |
+ continue |
|
| 215 |
+ } |
|
| 216 |
+ if len(relative) > 0 {
|
|
| 217 |
+ pipeline.Build.Source.ContextDir = relative |
|
| 218 |
+ } |
|
| 219 |
+ // TODO: this should not be necessary |
|
| 220 |
+ pipeline.Build.Source.Name = k |
|
| 221 |
+ pipeline.Name = k |
|
| 222 |
+ pipeline.Image.ObjectName = k |
|
| 223 |
+ glog.V(4).Infof("created pipeline %+v", pipeline)
|
|
| 224 |
+ |
|
| 225 |
+ builds[v.Build] = pipeline |
|
| 226 |
+ pipelines = append(pipelines, pipeline) |
|
| 227 |
+ } |
|
| 228 |
+ |
|
| 229 |
+ if len(errs) > 0 {
|
|
| 230 |
+ return nil, utilerrs.NewAggregate(errs) |
|
| 231 |
+ } |
|
| 232 |
+ |
|
| 233 |
+ // create deployment groups |
|
| 234 |
+ for _, pod := range colocated {
|
|
| 235 |
+ var group app.PipelineGroup |
|
| 236 |
+ commonMounts := make(map[string]string) |
|
| 237 |
+ for _, k := range pod.List() {
|
|
| 238 |
+ v := p.Configs[k] |
|
| 239 |
+ var inputImage *app.ImageRef |
|
| 240 |
+ if len(v.Image) != 0 {
|
|
| 241 |
+ image, err := g.FromName(v.Image) |
|
| 242 |
+ if err != nil {
|
|
| 243 |
+ errs = append(errs, err) |
|
| 244 |
+ continue |
|
| 245 |
+ } |
|
| 246 |
+ image.AsImageStream = true |
|
| 247 |
+ image.TagDirectly = true |
|
| 248 |
+ image.ObjectName = k |
|
| 249 |
+ |
|
| 250 |
+ inputImage = image |
|
| 251 |
+ } |
|
| 252 |
+ if inputImage == nil {
|
|
| 253 |
+ if previous, ok := builds[v.Build]; ok {
|
|
| 254 |
+ inputImage = previous.Image |
|
| 255 |
+ } |
|
| 256 |
+ } |
|
| 257 |
+ if inputImage == nil {
|
|
| 258 |
+ errs = append(errs, fmt.Errorf("could not find an input image for %q", k))
|
|
| 259 |
+ continue |
|
| 260 |
+ } |
|
| 261 |
+ |
|
| 262 |
+ inputImage.ContainerFn = func(c *kapi.Container) {
|
|
| 263 |
+ if len(v.ContainerName) > 0 {
|
|
| 264 |
+ c.Name = v.ContainerName |
|
| 265 |
+ } |
|
| 266 |
+ for _, s := range v.Ports {
|
|
| 267 |
+ container, _ := extractFirstPorts(s) |
|
| 268 |
+ if port, err := strconv.Atoi(container); err == nil {
|
|
| 269 |
+ c.Ports = append(c.Ports, kapi.ContainerPort{ContainerPort: port})
|
|
| 270 |
+ } |
|
| 271 |
+ } |
|
| 272 |
+ c.Args = v.Command.Slice() |
|
| 273 |
+ if len(v.Entrypoint.Slice()) > 0 {
|
|
| 274 |
+ c.Command = v.Entrypoint.Slice() |
|
| 275 |
+ } |
|
| 276 |
+ if len(v.WorkingDir) > 0 {
|
|
| 277 |
+ c.WorkingDir = v.WorkingDir |
|
| 278 |
+ } |
|
| 279 |
+ c.Env = append(c.Env, app.ParseEnvironment(v.Environment.Slice()...).List()...) |
|
| 280 |
+ if uid, err := strconv.Atoi(v.User); err == nil {
|
|
| 281 |
+ uid64 := int64(uid) |
|
| 282 |
+ if c.SecurityContext == nil {
|
|
| 283 |
+ c.SecurityContext = &kapi.SecurityContext{}
|
|
| 284 |
+ } |
|
| 285 |
+ c.SecurityContext.RunAsUser = &uid64 |
|
| 286 |
+ } |
|
| 287 |
+ c.TTY = v.Tty |
|
| 288 |
+ if v.StdinOpen {
|
|
| 289 |
+ c.StdinOnce = true |
|
| 290 |
+ c.Stdin = true |
|
| 291 |
+ } |
|
| 292 |
+ if v.Privileged {
|
|
| 293 |
+ if c.SecurityContext == nil {
|
|
| 294 |
+ c.SecurityContext = &kapi.SecurityContext{}
|
|
| 295 |
+ } |
|
| 296 |
+ t := true |
|
| 297 |
+ c.SecurityContext.Privileged = &t |
|
| 298 |
+ } |
|
| 299 |
+ if v.ReadOnly {
|
|
| 300 |
+ if c.SecurityContext == nil {
|
|
| 301 |
+ c.SecurityContext = &kapi.SecurityContext{}
|
|
| 302 |
+ } |
|
| 303 |
+ t := true |
|
| 304 |
+ c.SecurityContext.ReadOnlyRootFilesystem = &t |
|
| 305 |
+ } |
|
| 306 |
+ if v.MemLimit > 0 {
|
|
| 307 |
+ q := resource.NewQuantity(v.MemLimit, resource.DecimalSI) |
|
| 308 |
+ if c.Resources.Limits == nil {
|
|
| 309 |
+ c.Resources.Limits = make(kapi.ResourceList) |
|
| 310 |
+ } |
|
| 311 |
+ c.Resources.Limits[kapi.ResourceMemory] = *q |
|
| 312 |
+ } |
|
| 313 |
+ |
|
| 314 |
+ if quota := v.CPUQuota; quota > 0 {
|
|
| 315 |
+ if quota < 1000 {
|
|
| 316 |
+ quota = 1000 // minQuotaPeriod |
|
| 317 |
+ } |
|
| 318 |
+ milliCPU := quota * 1000 // milliCPUtoCPU |
|
| 319 |
+ milliCPU = milliCPU / 100000 // quotaPeriod |
|
| 320 |
+ q := resource.NewMilliQuantity(milliCPU, resource.DecimalSI) |
|
| 321 |
+ if c.Resources.Limits == nil {
|
|
| 322 |
+ c.Resources.Limits = make(kapi.ResourceList) |
|
| 323 |
+ } |
|
| 324 |
+ c.Resources.Limits[kapi.ResourceCPU] = *q |
|
| 325 |
+ } |
|
| 326 |
+ if shares := v.CPUShares; shares > 0 {
|
|
| 327 |
+ if shares < 2 {
|
|
| 328 |
+ shares = 2 // minShares |
|
| 329 |
+ } |
|
| 330 |
+ milliCPU := shares * 1000 // milliCPUtoCPU |
|
| 331 |
+ milliCPU = milliCPU / 1024 // sharesPerCPU |
|
| 332 |
+ q := resource.NewMilliQuantity(milliCPU, resource.DecimalSI) |
|
| 333 |
+ if c.Resources.Requests == nil {
|
|
| 334 |
+ c.Resources.Requests = make(kapi.ResourceList) |
|
| 335 |
+ } |
|
| 336 |
+ c.Resources.Requests[kapi.ResourceCPU] = *q |
|
| 337 |
+ } |
|
| 338 |
+ |
|
| 339 |
+ mountPoints := make(map[string][]string) |
|
| 340 |
+ for _, s := range v.Volumes {
|
|
| 341 |
+ switch parts := strings.SplitN(s, ":", 3); len(parts) {
|
|
| 342 |
+ case 1: |
|
| 343 |
+ mountPoints[""] = append(mountPoints[""], parts[0]) |
|
| 344 |
+ |
|
| 345 |
+ case 2: |
|
| 346 |
+ fallthrough |
|
| 347 |
+ default: |
|
| 348 |
+ mountPoints[parts[0]] = append(mountPoints[parts[0]], parts[1]) |
|
| 349 |
+ } |
|
| 350 |
+ } |
|
| 351 |
+ for from, at := range mountPoints {
|
|
| 352 |
+ name, ok := commonMounts[from] |
|
| 353 |
+ if !ok {
|
|
| 354 |
+ name = fmt.Sprintf("dir-%d", len(commonMounts)+1)
|
|
| 355 |
+ commonMounts[from] = name |
|
| 356 |
+ } |
|
| 357 |
+ for _, path := range at {
|
|
| 358 |
+ c.VolumeMounts = append(c.VolumeMounts, kapi.VolumeMount{Name: name, MountPath: path})
|
|
| 359 |
+ } |
|
| 360 |
+ } |
|
| 361 |
+ } |
|
| 362 |
+ |
|
| 363 |
+ pipeline, err := app.NewPipelineBuilder(k, nil, true).To(k).NewImagePipeline(k, inputImage) |
|
| 364 |
+ if err != nil {
|
|
| 365 |
+ errs = append(errs, err) |
|
| 366 |
+ break |
|
| 367 |
+ } |
|
| 368 |
+ |
|
| 369 |
+ if err := pipeline.NeedsDeployment(nil, nil, false); err != nil {
|
|
| 370 |
+ return nil, err |
|
| 371 |
+ } |
|
| 372 |
+ |
|
| 373 |
+ group = append(group, pipeline) |
|
| 374 |
+ } |
|
| 375 |
+ if err := group.Reduce(); err != nil {
|
|
| 376 |
+ return nil, err |
|
| 377 |
+ } |
|
| 378 |
+ pipelines = append(pipelines, group...) |
|
| 379 |
+ } |
|
| 380 |
+ |
|
| 381 |
+ if len(errs) > 0 {
|
|
| 382 |
+ return nil, utilerrs.NewAggregate(errs) |
|
| 383 |
+ } |
|
| 384 |
+ |
|
| 385 |
+ acceptors := app.Acceptors{app.NewAcceptUnique(kapi.Scheme), app.AcceptNew}
|
|
| 386 |
+ objects := app.Objects{}
|
|
| 387 |
+ accept := app.NewAcceptFirst() |
|
| 388 |
+ for _, p := range pipelines {
|
|
| 389 |
+ accepted, err := p.Objects(accept, acceptors) |
|
| 390 |
+ if err != nil {
|
|
| 391 |
+ return nil, fmt.Errorf("can't setup %q: %v", p.From, err)
|
|
| 392 |
+ } |
|
| 393 |
+ objects = append(objects, accepted...) |
|
| 394 |
+ } |
|
| 395 |
+ |
|
| 396 |
+ // create services for each object with a name based on alias. |
|
| 397 |
+ containers := make(map[string]*kapi.Container) |
|
| 398 |
+ var services []*kapi.Service |
|
| 399 |
+ for _, obj := range objects {
|
|
| 400 |
+ switch t := obj.(type) {
|
|
| 401 |
+ case *deployapi.DeploymentConfig: |
|
| 402 |
+ ports := app.UniqueContainerToServicePorts(app.AllContainerPorts(t.Spec.Template.Spec.Containers...)) |
|
| 403 |
+ if len(ports) == 0 {
|
|
| 404 |
+ continue |
|
| 405 |
+ } |
|
| 406 |
+ svc := app.GenerateService(t.ObjectMeta, t.Spec.Selector) |
|
| 407 |
+ if aliases[svc.Name].Len() == 1 {
|
|
| 408 |
+ svc.Name = aliases[svc.Name].List()[0] |
|
| 409 |
+ } |
|
| 410 |
+ svc.Spec.Ports = ports |
|
| 411 |
+ services = append(services, svc) |
|
| 412 |
+ |
|
| 413 |
+ // take a reference to each container |
|
| 414 |
+ for i := range t.Spec.Template.Spec.Containers {
|
|
| 415 |
+ c := &t.Spec.Template.Spec.Containers[i] |
|
| 416 |
+ containers[c.Name] = c |
|
| 417 |
+ } |
|
| 418 |
+ } |
|
| 419 |
+ } |
|
| 420 |
+ for _, svc := range services {
|
|
| 421 |
+ objects = append(objects, svc) |
|
| 422 |
+ } |
|
| 423 |
+ |
|
| 424 |
+ // for each container that defines VolumesFrom, copy equivalent mounts. |
|
| 425 |
+ // TODO: ensure mount names are unique? |
|
| 426 |
+ for target, otherContainers := range volumesFrom {
|
|
| 427 |
+ for _, from := range otherContainers {
|
|
| 428 |
+ for _, volume := range containers[from].VolumeMounts {
|
|
| 429 |
+ containers[target].VolumeMounts = append(containers[target].VolumeMounts, volume) |
|
| 430 |
+ } |
|
| 431 |
+ } |
|
| 432 |
+ } |
|
| 433 |
+ |
|
| 434 |
+ template.Objects = objects |
|
| 435 |
+ |
|
| 436 |
+ // generate warnings |
|
| 437 |
+ if len(warnings) > 0 {
|
|
| 438 |
+ allWarnings := sets.NewString() |
|
| 439 |
+ for msg, services := range warnings {
|
|
| 440 |
+ allWarnings.Insert(fmt.Sprintf("%s: %s", strings.Join(services, ","), msg))
|
|
| 441 |
+ } |
|
| 442 |
+ if template.Annotations == nil {
|
|
| 443 |
+ template.Annotations = make(map[string]string) |
|
| 444 |
+ } |
|
| 445 |
+ template.Annotations[app.GenerationWarningAnnotation] = fmt.Sprintf("not all docker-compose fields were honored:\n* %s", strings.Join(allWarnings.List(), "\n* "))
|
|
| 446 |
+ } |
|
| 447 |
+ |
|
| 448 |
+ return template, nil |
|
| 449 |
+} |
|
| 450 |
+ |
|
| 451 |
+// extractFirstPorts converts a Docker compose port spec (CONTAINER, HOST:CONTAINER, or |
|
| 452 |
+// IP:HOST:CONTAINER) to the first container and host port in the range. Host port will |
|
| 453 |
+// default to container port. |
|
| 454 |
+func extractFirstPorts(port string) (container, host string) {
|
|
| 455 |
+ segments := strings.Split(port, ":") |
|
| 456 |
+ container = segments[len(segments)-1] |
|
| 457 |
+ container = rangeToPort(container) |
|
| 458 |
+ switch {
|
|
| 459 |
+ case len(segments) == 3: |
|
| 460 |
+ host = rangeToPort(segments[1]) |
|
| 461 |
+ case len(segments) == 2 && net.ParseIP(segments[0]) == nil: |
|
| 462 |
+ host = rangeToPort(segments[0]) |
|
| 463 |
+ default: |
|
| 464 |
+ host = container |
|
| 465 |
+ } |
|
| 466 |
+ return container, host |
|
| 467 |
+} |
|
| 468 |
+ |
|
| 469 |
+func rangeToPort(s string) string {
|
|
| 470 |
+ parts := strings.SplitN(s, "-", 2) |
|
| 471 |
+ return parts[0] |
|
| 472 |
+} |
|
| 473 |
+ |
|
| 474 |
+// warnUnusableComposeElements add warnings for unsupported elements in the provided service config |
|
| 475 |
+func warnUnusableComposeElements(k string, v *project.ServiceConfig, warnings map[string][]string) {
|
|
| 476 |
+ fn := func(msg string) {
|
|
| 477 |
+ warnings[msg] = append(warnings[msg], k) |
|
| 478 |
+ } |
|
| 479 |
+ if len(v.CapAdd) > 0 || len(v.CapDrop) > 0 {
|
|
| 480 |
+ // TODO: we can support this |
|
| 481 |
+ fn("cap_add and cap_drop are not supported")
|
|
| 482 |
+ } |
|
| 483 |
+ if len(v.CgroupParent) > 0 {
|
|
| 484 |
+ fn("cgroup_parent is not supported")
|
|
| 485 |
+ } |
|
| 486 |
+ if len(v.CPUSet) > 0 {
|
|
| 487 |
+ fn("cpuset is not supported")
|
|
| 488 |
+ } |
|
| 489 |
+ if len(v.Devices) > 0 {
|
|
| 490 |
+ fn("devices are not supported")
|
|
| 491 |
+ } |
|
| 492 |
+ if v.DNS.Len() > 0 || v.DNSSearch.Len() > 0 {
|
|
| 493 |
+ fn("dns and dns_search are not supported")
|
|
| 494 |
+ } |
|
| 495 |
+ if len(v.DomainName) > 0 {
|
|
| 496 |
+ fn("domainname is not supported")
|
|
| 497 |
+ } |
|
| 498 |
+ if len(v.Hostname) > 0 {
|
|
| 499 |
+ fn("hostname is not supported")
|
|
| 500 |
+ } |
|
| 501 |
+ if len(v.Labels.MapParts()) > 0 {
|
|
| 502 |
+ fn("labels is ignored")
|
|
| 503 |
+ } |
|
| 504 |
+ if len(v.Links.Slice()) > 0 {
|
|
| 505 |
+ //fn("links are not supported, use services to talk to other pods")
|
|
| 506 |
+ // TODO: display some sort of warning when linking will be inconsistent |
|
| 507 |
+ } |
|
| 508 |
+ if len(v.LogDriver) > 0 {
|
|
| 509 |
+ fn("log_driver is not supported")
|
|
| 510 |
+ } |
|
| 511 |
+ if len(v.MacAddress) > 0 {
|
|
| 512 |
+ fn("mac_address is not supported")
|
|
| 513 |
+ } |
|
| 514 |
+ if len(v.Net) > 0 {
|
|
| 515 |
+ fn("net is not supported")
|
|
| 516 |
+ } |
|
| 517 |
+ if len(v.Pid) > 0 {
|
|
| 518 |
+ fn("pid is not supported")
|
|
| 519 |
+ } |
|
| 520 |
+ if len(v.Uts) > 0 {
|
|
| 521 |
+ fn("uts is not supported")
|
|
| 522 |
+ } |
|
| 523 |
+ if len(v.Ipc) > 0 {
|
|
| 524 |
+ fn("ipc is not supported")
|
|
| 525 |
+ } |
|
| 526 |
+ if v.MemSwapLimit > 0 {
|
|
| 527 |
+ fn("mem_swap_limit is not supported")
|
|
| 528 |
+ } |
|
| 529 |
+ if len(v.Restart) > 0 {
|
|
| 530 |
+ fn("restart is ignored - all pods are automatically restarted")
|
|
| 531 |
+ } |
|
| 532 |
+ if len(v.SecurityOpt) > 0 {
|
|
| 533 |
+ fn("security_opt is not supported")
|
|
| 534 |
+ } |
|
| 535 |
+ if len(v.User) > 0 {
|
|
| 536 |
+ if _, err := strconv.Atoi(v.User); err != nil {
|
|
| 537 |
+ fn("setting user to a string is not supported - use numeric user value")
|
|
| 538 |
+ } |
|
| 539 |
+ } |
|
| 540 |
+ if len(v.VolumeDriver) > 0 {
|
|
| 541 |
+ fn("volume_driver is not supported")
|
|
| 542 |
+ } |
|
| 543 |
+ if len(v.VolumesFrom) > 0 {
|
|
| 544 |
+ fn("volumes_from is not supported")
|
|
| 545 |
+ // TODO: use volumes from for colocated containers to automount volumes |
|
| 546 |
+ } |
|
| 547 |
+ if len(v.ExternalLinks) > 0 {
|
|
| 548 |
+ fn("external_links are not supported - use services")
|
|
| 549 |
+ } |
|
| 550 |
+ if len(v.LogOpt) > 0 {
|
|
| 551 |
+ fn("log_opt is not supported")
|
|
| 552 |
+ } |
|
| 553 |
+ if len(v.ExtraHosts) > 0 {
|
|
| 554 |
+ fn("extra_hosts is not supported")
|
|
| 555 |
+ } |
|
| 556 |
+ if len(v.Ulimits.Elements) > 0 {
|
|
| 557 |
+ fn("ulimits is not supported")
|
|
| 558 |
+ } |
|
| 559 |
+ // TODO: fields to handle |
|
| 560 |
+ // EnvFile Stringorslice `yaml:"env_file,omitempty"` |
|
| 561 |
+} |
| 0 | 562 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,57 @@ |
| 0 |
+package dockercompose |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "os" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/golang/glog" |
|
| 7 |
+ |
|
| 8 |
+ kapi "k8s.io/kubernetes/pkg/api" |
|
| 9 |
+ "k8s.io/kubernetes/pkg/runtime" |
|
| 10 |
+ |
|
| 11 |
+ "github.com/openshift/origin/pkg/generate/app" |
|
| 12 |
+) |
|
| 13 |
+ |
|
| 14 |
+// TemplateFileSearcher resolves template files into template objects |
|
| 15 |
+type FileSearcher struct {
|
|
| 16 |
+} |
|
| 17 |
+ |
|
| 18 |
+// Search attemps to read template files and transform it into template objects |
|
| 19 |
+func (r *FileSearcher) Search(precise bool, terms ...string) (app.ComponentMatches, []error) {
|
|
| 20 |
+ matches := app.ComponentMatches{}
|
|
| 21 |
+ var errs []error |
|
| 22 |
+ for _, term := range terms {
|
|
| 23 |
+ if term == "__dockercomposefile_fail" {
|
|
| 24 |
+ errs = append(errs, fmt.Errorf("unable to find the specified template file: %s", term))
|
|
| 25 |
+ continue |
|
| 26 |
+ } |
|
| 27 |
+ if !IsPossibleDockerCompose(term) {
|
|
| 28 |
+ continue |
|
| 29 |
+ } |
|
| 30 |
+ if _, err := os.Stat(term); err != nil {
|
|
| 31 |
+ continue |
|
| 32 |
+ } |
|
| 33 |
+ template, err := Generate(term) |
|
| 34 |
+ if err != nil {
|
|
| 35 |
+ errs = append(errs, fmt.Errorf("unable to convert docker-compose.yaml: %v", err))
|
|
| 36 |
+ continue |
|
| 37 |
+ } |
|
| 38 |
+ |
|
| 39 |
+ // TODO: template processing should handle objects that are not versioned |
|
| 40 |
+ for i := range template.Objects {
|
|
| 41 |
+ template.Objects[i] = runtime.NewEncodable(kapi.Codecs.LegacyCodec(), template.Objects[i]) |
|
| 42 |
+ } |
|
| 43 |
+ |
|
| 44 |
+ glog.V(4).Infof("found docker-compose: %#v", template)
|
|
| 45 |
+ matches = append(matches, &app.ComponentMatch{
|
|
| 46 |
+ Value: term, |
|
| 47 |
+ Argument: fmt.Sprintf("--file=%q", template.Name),
|
|
| 48 |
+ Name: template.Name, |
|
| 49 |
+ Description: fmt.Sprintf("Docker compose file %s", term),
|
|
| 50 |
+ Score: 0, |
|
| 51 |
+ Template: template, |
|
| 52 |
+ }) |
|
| 53 |
+ } |
|
| 54 |
+ |
|
| 55 |
+ return matches, errs |
|
| 56 |
+} |
| ... | ... |
@@ -199,6 +199,12 @@ os::cmd::expect_success 'oc delete all -l app=ruby' |
| 199 | 199 |
jsonfile="${OS_ROOT}/test/fixtures/invalid.json"
|
| 200 | 200 |
os::cmd::expect_failure_and_text "oc new-app '${jsonfile}'" "error: unable to load template file \"${jsonfile}\": at offset 8: invalid character '}' after object key"
|
| 201 | 201 |
|
| 202 |
+# a docker compose file should be transformed into an application by the import command |
|
| 203 |
+os::cmd::expect_success_and_text 'oc import docker-compose -f test/fixtures/app-scenarios/docker-compose/complex/docker-compose.yml --dry-run' 'warning: not all docker-compose fields were honored' |
|
| 204 |
+os::cmd::expect_success_and_text 'oc import docker-compose -f test/fixtures/app-scenarios/docker-compose/complex/docker-compose.yml --dry-run' 'db: cpuset is not supported' |
|
| 205 |
+os::cmd::expect_success_and_text 'oc import docker-compose -f test/fixtures/app-scenarios/docker-compose/complex/docker-compose.yml -o name --dry-run' 'service/redis' |
|
| 206 |
+os::cmd::expect_success_and_text 'oc import docker-compose -f test/fixtures/app-scenarios/docker-compose/complex/docker-compose.yml -o name --as-template=other --dry-run' 'template/other' |
|
| 207 |
+os::cmd::expect_failure 'diff --suppress-common-lines -y <(oc import docker-compose -f test/fixtures/app-scenarios/docker-compose/complex/docker-compose.yml -o yaml) test/fixtures/app-scenarios/docker-compose/complex/docker-compose.imported.yaml | grep -v secret' |
|
| 202 | 208 |
|
| 203 | 209 |
# check new-build |
| 204 | 210 |
os::cmd::expect_failure_and_text 'oc new-build mysql -o yaml' 'you must specify at least one source repository URL' |
| 0 | 11 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,307 @@ |
| 0 |
+apiVersion: v1 |
|
| 1 |
+items: |
|
| 2 |
+- apiVersion: v1 |
|
| 3 |
+ kind: ImageStream |
|
| 4 |
+ metadata: |
|
| 5 |
+ annotations: |
|
| 6 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 7 |
+ creationTimestamp: null |
|
| 8 |
+ labels: |
|
| 9 |
+ app: nginx |
|
| 10 |
+ name: nginx |
|
| 11 |
+ spec: |
|
| 12 |
+ tags: |
|
| 13 |
+ - annotations: |
|
| 14 |
+ openshift.io/imported-from: tutum/nginx |
|
| 15 |
+ from: |
|
| 16 |
+ kind: DockerImage |
|
| 17 |
+ name: tutum/nginx |
|
| 18 |
+ generation: null |
|
| 19 |
+ importPolicy: {}
|
|
| 20 |
+ name: from |
|
| 21 |
+ status: |
|
| 22 |
+ dockerImageRepository: "" |
|
| 23 |
+- apiVersion: v1 |
|
| 24 |
+ kind: BuildConfig |
|
| 25 |
+ metadata: |
|
| 26 |
+ annotations: |
|
| 27 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 28 |
+ creationTimestamp: null |
|
| 29 |
+ labels: |
|
| 30 |
+ app: nginx |
|
| 31 |
+ name: nginx |
|
| 32 |
+ spec: |
|
| 33 |
+ output: |
|
| 34 |
+ to: |
|
| 35 |
+ kind: ImageStreamTag |
|
| 36 |
+ name: nginx:latest |
|
| 37 |
+ postCommit: {}
|
|
| 38 |
+ resources: {}
|
|
| 39 |
+ source: |
|
| 40 |
+ contextDir: test/fixtures/app-scenarios/docker-compose/complex/nginx |
|
| 41 |
+ git: |
|
| 42 |
+ ref: libcompose |
|
| 43 |
+ uri: git@github.com:openshift/origin.git |
|
| 44 |
+ secrets: null |
|
| 45 |
+ type: Git |
|
| 46 |
+ strategy: |
|
| 47 |
+ dockerStrategy: |
|
| 48 |
+ from: |
|
| 49 |
+ kind: ImageStreamTag |
|
| 50 |
+ name: nginx:from |
|
| 51 |
+ type: Docker |
|
| 52 |
+ triggers: |
|
| 53 |
+ - github: |
|
| 54 |
+ secret: IE96Fw4CdQs-g3giIjWZ |
|
| 55 |
+ type: GitHub |
|
| 56 |
+ - generic: |
|
| 57 |
+ secret: B8mx-lYjIBjNY6ymP6MT |
|
| 58 |
+ type: Generic |
|
| 59 |
+ - type: ConfigChange |
|
| 60 |
+ - imageChange: {}
|
|
| 61 |
+ type: ImageChange |
|
| 62 |
+ status: |
|
| 63 |
+ lastVersion: 0 |
|
| 64 |
+- apiVersion: v1 |
|
| 65 |
+ kind: ImageStream |
|
| 66 |
+ metadata: |
|
| 67 |
+ annotations: |
|
| 68 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 69 |
+ creationTimestamp: null |
|
| 70 |
+ labels: |
|
| 71 |
+ app: nginx |
|
| 72 |
+ name: web |
|
| 73 |
+ spec: |
|
| 74 |
+ tags: |
|
| 75 |
+ - annotations: |
|
| 76 |
+ openshift.io/imported-from: node:0.10.38 |
|
| 77 |
+ from: |
|
| 78 |
+ kind: DockerImage |
|
| 79 |
+ name: node:0.10.38 |
|
| 80 |
+ generation: null |
|
| 81 |
+ importPolicy: {}
|
|
| 82 |
+ name: from |
|
| 83 |
+ status: |
|
| 84 |
+ dockerImageRepository: "" |
|
| 85 |
+- apiVersion: v1 |
|
| 86 |
+ kind: BuildConfig |
|
| 87 |
+ metadata: |
|
| 88 |
+ annotations: |
|
| 89 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 90 |
+ creationTimestamp: null |
|
| 91 |
+ labels: |
|
| 92 |
+ app: nginx |
|
| 93 |
+ name: web |
|
| 94 |
+ spec: |
|
| 95 |
+ output: |
|
| 96 |
+ to: |
|
| 97 |
+ kind: ImageStreamTag |
|
| 98 |
+ name: web:latest |
|
| 99 |
+ postCommit: {}
|
|
| 100 |
+ resources: {}
|
|
| 101 |
+ source: |
|
| 102 |
+ contextDir: test/fixtures/app-scenarios/docker-compose/complex/app |
|
| 103 |
+ git: |
|
| 104 |
+ ref: libcompose |
|
| 105 |
+ uri: git@github.com:openshift/origin.git |
|
| 106 |
+ secrets: null |
|
| 107 |
+ type: Git |
|
| 108 |
+ strategy: |
|
| 109 |
+ dockerStrategy: |
|
| 110 |
+ from: |
|
| 111 |
+ kind: ImageStreamTag |
|
| 112 |
+ name: web:from |
|
| 113 |
+ type: Docker |
|
| 114 |
+ triggers: |
|
| 115 |
+ - github: |
|
| 116 |
+ secret: xMiKAo9EEZMKMxCRZSUk |
|
| 117 |
+ type: GitHub |
|
| 118 |
+ - generic: |
|
| 119 |
+ secret: doIIibCQIxRvv28uyP9J |
|
| 120 |
+ type: Generic |
|
| 121 |
+ - type: ConfigChange |
|
| 122 |
+ - imageChange: {}
|
|
| 123 |
+ type: ImageChange |
|
| 124 |
+ status: |
|
| 125 |
+ lastVersion: 0 |
|
| 126 |
+- apiVersion: v1 |
|
| 127 |
+ kind: ImageStream |
|
| 128 |
+ metadata: |
|
| 129 |
+ annotations: |
|
| 130 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 131 |
+ creationTimestamp: null |
|
| 132 |
+ labels: |
|
| 133 |
+ app: nginx |
|
| 134 |
+ name: db |
|
| 135 |
+ spec: |
|
| 136 |
+ tags: |
|
| 137 |
+ - annotations: |
|
| 138 |
+ openshift.io/imported-from: redis |
|
| 139 |
+ from: |
|
| 140 |
+ kind: DockerImage |
|
| 141 |
+ name: redis |
|
| 142 |
+ generation: null |
|
| 143 |
+ importPolicy: {}
|
|
| 144 |
+ name: latest |
|
| 145 |
+ status: |
|
| 146 |
+ dockerImageRepository: "" |
|
| 147 |
+- apiVersion: v1 |
|
| 148 |
+ kind: DeploymentConfig |
|
| 149 |
+ metadata: |
|
| 150 |
+ annotations: |
|
| 151 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 152 |
+ creationTimestamp: null |
|
| 153 |
+ labels: |
|
| 154 |
+ app: nginx |
|
| 155 |
+ name: db |
|
| 156 |
+ spec: |
|
| 157 |
+ replicas: 1 |
|
| 158 |
+ selector: |
|
| 159 |
+ deploymentconfig: db |
|
| 160 |
+ strategy: |
|
| 161 |
+ resources: {}
|
|
| 162 |
+ template: |
|
| 163 |
+ metadata: |
|
| 164 |
+ annotations: |
|
| 165 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 166 |
+ creationTimestamp: null |
|
| 167 |
+ labels: |
|
| 168 |
+ app: nginx |
|
| 169 |
+ deploymentconfig: db |
|
| 170 |
+ spec: |
|
| 171 |
+ containers: |
|
| 172 |
+ - image: redis |
|
| 173 |
+ name: db |
|
| 174 |
+ ports: |
|
| 175 |
+ - containerPort: 6379 |
|
| 176 |
+ resources: |
|
| 177 |
+ limits: |
|
| 178 |
+ cpu: 100m |
|
| 179 |
+ memory: 1G |
|
| 180 |
+ test: false |
|
| 181 |
+ triggers: |
|
| 182 |
+ - type: ConfigChange |
|
| 183 |
+ - imageChangeParams: |
|
| 184 |
+ automatic: true |
|
| 185 |
+ containerNames: |
|
| 186 |
+ - db |
|
| 187 |
+ from: |
|
| 188 |
+ kind: ImageStreamTag |
|
| 189 |
+ name: db:latest |
|
| 190 |
+ type: ImageChange |
|
| 191 |
+ status: {}
|
|
| 192 |
+- apiVersion: v1 |
|
| 193 |
+ kind: DeploymentConfig |
|
| 194 |
+ metadata: |
|
| 195 |
+ annotations: |
|
| 196 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 197 |
+ creationTimestamp: null |
|
| 198 |
+ labels: |
|
| 199 |
+ app: nginx |
|
| 200 |
+ name: nginx |
|
| 201 |
+ spec: |
|
| 202 |
+ replicas: 1 |
|
| 203 |
+ selector: |
|
| 204 |
+ deploymentconfig: nginx |
|
| 205 |
+ strategy: |
|
| 206 |
+ resources: {}
|
|
| 207 |
+ template: |
|
| 208 |
+ metadata: |
|
| 209 |
+ annotations: |
|
| 210 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 211 |
+ creationTimestamp: null |
|
| 212 |
+ labels: |
|
| 213 |
+ app: nginx |
|
| 214 |
+ deploymentconfig: nginx |
|
| 215 |
+ spec: |
|
| 216 |
+ containers: |
|
| 217 |
+ - image: nginx |
|
| 218 |
+ name: nginx |
|
| 219 |
+ ports: |
|
| 220 |
+ - containerPort: 80 |
|
| 221 |
+ resources: {}
|
|
| 222 |
+ volumeMounts: |
|
| 223 |
+ - mountPath: /www/public |
|
| 224 |
+ name: dir-1 |
|
| 225 |
+ - mountPath: /src/app |
|
| 226 |
+ name: dir-2 |
|
| 227 |
+ - args: |
|
| 228 |
+ - nodemon |
|
| 229 |
+ - -L |
|
| 230 |
+ - app/bin/www |
|
| 231 |
+ image: web |
|
| 232 |
+ name: web |
|
| 233 |
+ ports: |
|
| 234 |
+ - containerPort: 3000 |
|
| 235 |
+ resources: |
|
| 236 |
+ requests: |
|
| 237 |
+ cpu: 500m |
|
| 238 |
+ volumeMounts: |
|
| 239 |
+ - mountPath: /src/app |
|
| 240 |
+ name: dir-2 |
|
| 241 |
+ volumes: |
|
| 242 |
+ - emptyDir: {}
|
|
| 243 |
+ name: dir-1 |
|
| 244 |
+ - emptyDir: {}
|
|
| 245 |
+ name: dir-2 |
|
| 246 |
+ test: false |
|
| 247 |
+ triggers: |
|
| 248 |
+ - type: ConfigChange |
|
| 249 |
+ - imageChangeParams: |
|
| 250 |
+ automatic: true |
|
| 251 |
+ containerNames: |
|
| 252 |
+ - nginx |
|
| 253 |
+ from: |
|
| 254 |
+ kind: ImageStreamTag |
|
| 255 |
+ name: nginx:latest |
|
| 256 |
+ type: ImageChange |
|
| 257 |
+ - imageChangeParams: |
|
| 258 |
+ automatic: true |
|
| 259 |
+ containerNames: |
|
| 260 |
+ - web |
|
| 261 |
+ from: |
|
| 262 |
+ kind: ImageStreamTag |
|
| 263 |
+ name: web:latest |
|
| 264 |
+ type: ImageChange |
|
| 265 |
+ status: {}
|
|
| 266 |
+- apiVersion: v1 |
|
| 267 |
+ kind: Service |
|
| 268 |
+ metadata: |
|
| 269 |
+ annotations: |
|
| 270 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 271 |
+ creationTimestamp: null |
|
| 272 |
+ labels: |
|
| 273 |
+ app: nginx |
|
| 274 |
+ name: redis |
|
| 275 |
+ spec: |
|
| 276 |
+ ports: |
|
| 277 |
+ - name: 6379-tcp |
|
| 278 |
+ port: 6379 |
|
| 279 |
+ targetPort: 6379 |
|
| 280 |
+ selector: |
|
| 281 |
+ deploymentconfig: db |
|
| 282 |
+ status: |
|
| 283 |
+ loadBalancer: {}
|
|
| 284 |
+- apiVersion: v1 |
|
| 285 |
+ kind: Service |
|
| 286 |
+ metadata: |
|
| 287 |
+ annotations: |
|
| 288 |
+ openshift.io/generated-by: OpenShiftNewApp |
|
| 289 |
+ creationTimestamp: null |
|
| 290 |
+ labels: |
|
| 291 |
+ app: nginx |
|
| 292 |
+ name: nginx |
|
| 293 |
+ spec: |
|
| 294 |
+ ports: |
|
| 295 |
+ - name: 80-tcp |
|
| 296 |
+ port: 80 |
|
| 297 |
+ targetPort: 80 |
|
| 298 |
+ - name: 3000-tcp |
|
| 299 |
+ port: 3000 |
|
| 300 |
+ targetPort: 3000 |
|
| 301 |
+ selector: |
|
| 302 |
+ deploymentconfig: nginx |
|
| 303 |
+ status: |
|
| 304 |
+ loadBalancer: {}
|
|
| 305 |
+kind: List |
|
| 306 |
+metadata: {}
|
| 0 | 307 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,267 @@ |
| 0 |
+apiVersion: v1 |
|
| 1 |
+kind: Template |
|
| 2 |
+metadata: |
|
| 3 |
+ creationTimestamp: null |
|
| 4 |
+ name: complex |
|
| 5 |
+objects: |
|
| 6 |
+- apiVersion: v1 |
|
| 7 |
+ kind: ImageStream |
|
| 8 |
+ metadata: |
|
| 9 |
+ creationTimestamp: null |
|
| 10 |
+ name: nginx |
|
| 11 |
+ spec: |
|
| 12 |
+ tags: |
|
| 13 |
+ - annotations: |
|
| 14 |
+ openshift.io/imported-from: tutum/nginx |
|
| 15 |
+ from: |
|
| 16 |
+ kind: DockerImage |
|
| 17 |
+ name: tutum/nginx |
|
| 18 |
+ generation: null |
|
| 19 |
+ importPolicy: {}
|
|
| 20 |
+ name: from |
|
| 21 |
+ status: |
|
| 22 |
+ dockerImageRepository: "" |
|
| 23 |
+- apiVersion: v1 |
|
| 24 |
+ kind: BuildConfig |
|
| 25 |
+ metadata: |
|
| 26 |
+ creationTimestamp: null |
|
| 27 |
+ name: nginx |
|
| 28 |
+ spec: |
|
| 29 |
+ output: |
|
| 30 |
+ to: |
|
| 31 |
+ kind: ImageStreamTag |
|
| 32 |
+ name: nginx:latest |
|
| 33 |
+ postCommit: {}
|
|
| 34 |
+ resources: {}
|
|
| 35 |
+ source: |
|
| 36 |
+ contextDir: test/fixtures/app-scenarios/docker-compose/complex/nginx |
|
| 37 |
+ git: |
|
| 38 |
+ ref: libcompose |
|
| 39 |
+ uri: git@github.com:openshift/origin.git |
|
| 40 |
+ secrets: null |
|
| 41 |
+ type: Git |
|
| 42 |
+ strategy: |
|
| 43 |
+ dockerStrategy: |
|
| 44 |
+ from: |
|
| 45 |
+ kind: ImageStreamTag |
|
| 46 |
+ name: nginx:from |
|
| 47 |
+ type: Docker |
|
| 48 |
+ triggers: |
|
| 49 |
+ - github: |
|
| 50 |
+ secret: gKR65ZJCwVsproaayUIz |
|
| 51 |
+ type: GitHub |
|
| 52 |
+ - generic: |
|
| 53 |
+ secret: PnTkFixxvwHK--JxtaJ7 |
|
| 54 |
+ type: Generic |
|
| 55 |
+ - type: ConfigChange |
|
| 56 |
+ - imageChange: {}
|
|
| 57 |
+ type: ImageChange |
|
| 58 |
+ status: |
|
| 59 |
+ lastVersion: 0 |
|
| 60 |
+- apiVersion: v1 |
|
| 61 |
+ kind: ImageStream |
|
| 62 |
+ metadata: |
|
| 63 |
+ creationTimestamp: null |
|
| 64 |
+ name: web |
|
| 65 |
+ spec: |
|
| 66 |
+ tags: |
|
| 67 |
+ - annotations: |
|
| 68 |
+ openshift.io/imported-from: node:0.10.38 |
|
| 69 |
+ from: |
|
| 70 |
+ kind: DockerImage |
|
| 71 |
+ name: node:0.10.38 |
|
| 72 |
+ generation: null |
|
| 73 |
+ importPolicy: {}
|
|
| 74 |
+ name: from |
|
| 75 |
+ status: |
|
| 76 |
+ dockerImageRepository: "" |
|
| 77 |
+- apiVersion: v1 |
|
| 78 |
+ kind: BuildConfig |
|
| 79 |
+ metadata: |
|
| 80 |
+ creationTimestamp: null |
|
| 81 |
+ name: web |
|
| 82 |
+ spec: |
|
| 83 |
+ output: |
|
| 84 |
+ to: |
|
| 85 |
+ kind: ImageStreamTag |
|
| 86 |
+ name: web:latest |
|
| 87 |
+ postCommit: {}
|
|
| 88 |
+ resources: {}
|
|
| 89 |
+ source: |
|
| 90 |
+ contextDir: test/fixtures/app-scenarios/docker-compose/complex/app |
|
| 91 |
+ git: |
|
| 92 |
+ ref: libcompose |
|
| 93 |
+ uri: git@github.com:openshift/origin.git |
|
| 94 |
+ secrets: null |
|
| 95 |
+ type: Git |
|
| 96 |
+ strategy: |
|
| 97 |
+ dockerStrategy: |
|
| 98 |
+ from: |
|
| 99 |
+ kind: ImageStreamTag |
|
| 100 |
+ name: web:from |
|
| 101 |
+ type: Docker |
|
| 102 |
+ triggers: |
|
| 103 |
+ - github: |
|
| 104 |
+ secret: f7q0OGOVc-usCy7xA-Ul |
|
| 105 |
+ type: GitHub |
|
| 106 |
+ - generic: |
|
| 107 |
+ secret: yK_wyTqSxqqoYZ_TVSoo |
|
| 108 |
+ type: Generic |
|
| 109 |
+ - type: ConfigChange |
|
| 110 |
+ - imageChange: {}
|
|
| 111 |
+ type: ImageChange |
|
| 112 |
+ status: |
|
| 113 |
+ lastVersion: 0 |
|
| 114 |
+- apiVersion: v1 |
|
| 115 |
+ kind: ImageStream |
|
| 116 |
+ metadata: |
|
| 117 |
+ creationTimestamp: null |
|
| 118 |
+ name: db |
|
| 119 |
+ spec: |
|
| 120 |
+ tags: |
|
| 121 |
+ - annotations: |
|
| 122 |
+ openshift.io/imported-from: redis |
|
| 123 |
+ from: |
|
| 124 |
+ kind: DockerImage |
|
| 125 |
+ name: redis |
|
| 126 |
+ generation: null |
|
| 127 |
+ importPolicy: {}
|
|
| 128 |
+ name: latest |
|
| 129 |
+ status: |
|
| 130 |
+ dockerImageRepository: "" |
|
| 131 |
+- apiVersion: v1 |
|
| 132 |
+ kind: DeploymentConfig |
|
| 133 |
+ metadata: |
|
| 134 |
+ creationTimestamp: null |
|
| 135 |
+ name: db |
|
| 136 |
+ spec: |
|
| 137 |
+ replicas: 1 |
|
| 138 |
+ selector: |
|
| 139 |
+ deploymentconfig: db |
|
| 140 |
+ strategy: |
|
| 141 |
+ resources: {}
|
|
| 142 |
+ template: |
|
| 143 |
+ metadata: |
|
| 144 |
+ creationTimestamp: null |
|
| 145 |
+ labels: |
|
| 146 |
+ deploymentconfig: db |
|
| 147 |
+ spec: |
|
| 148 |
+ containers: |
|
| 149 |
+ - image: redis |
|
| 150 |
+ name: db |
|
| 151 |
+ ports: |
|
| 152 |
+ - containerPort: 6379 |
|
| 153 |
+ resources: |
|
| 154 |
+ limits: |
|
| 155 |
+ cpu: 100m |
|
| 156 |
+ memory: 1G |
|
| 157 |
+ test: false |
|
| 158 |
+ triggers: |
|
| 159 |
+ - type: ConfigChange |
|
| 160 |
+ - imageChangeParams: |
|
| 161 |
+ automatic: true |
|
| 162 |
+ containerNames: |
|
| 163 |
+ - db |
|
| 164 |
+ from: |
|
| 165 |
+ kind: ImageStreamTag |
|
| 166 |
+ name: db:latest |
|
| 167 |
+ type: ImageChange |
|
| 168 |
+ status: {}
|
|
| 169 |
+- apiVersion: v1 |
|
| 170 |
+ kind: DeploymentConfig |
|
| 171 |
+ metadata: |
|
| 172 |
+ creationTimestamp: null |
|
| 173 |
+ name: nginx |
|
| 174 |
+ spec: |
|
| 175 |
+ replicas: 1 |
|
| 176 |
+ selector: |
|
| 177 |
+ deploymentconfig: nginx |
|
| 178 |
+ strategy: |
|
| 179 |
+ resources: {}
|
|
| 180 |
+ template: |
|
| 181 |
+ metadata: |
|
| 182 |
+ creationTimestamp: null |
|
| 183 |
+ labels: |
|
| 184 |
+ deploymentconfig: nginx |
|
| 185 |
+ spec: |
|
| 186 |
+ containers: |
|
| 187 |
+ - image: nginx |
|
| 188 |
+ name: nginx |
|
| 189 |
+ ports: |
|
| 190 |
+ - containerPort: 80 |
|
| 191 |
+ resources: {}
|
|
| 192 |
+ volumeMounts: |
|
| 193 |
+ - mountPath: /www/public |
|
| 194 |
+ name: dir-1 |
|
| 195 |
+ - mountPath: /src/app |
|
| 196 |
+ name: dir-2 |
|
| 197 |
+ - args: |
|
| 198 |
+ - nodemon |
|
| 199 |
+ - -L |
|
| 200 |
+ - app/bin/www |
|
| 201 |
+ image: web |
|
| 202 |
+ name: web |
|
| 203 |
+ ports: |
|
| 204 |
+ - containerPort: 3000 |
|
| 205 |
+ resources: |
|
| 206 |
+ requests: |
|
| 207 |
+ cpu: 500m |
|
| 208 |
+ volumeMounts: |
|
| 209 |
+ - mountPath: /src/app |
|
| 210 |
+ name: dir-2 |
|
| 211 |
+ volumes: |
|
| 212 |
+ - emptyDir: {}
|
|
| 213 |
+ name: dir-1 |
|
| 214 |
+ - emptyDir: {}
|
|
| 215 |
+ name: dir-2 |
|
| 216 |
+ test: false |
|
| 217 |
+ triggers: |
|
| 218 |
+ - type: ConfigChange |
|
| 219 |
+ - imageChangeParams: |
|
| 220 |
+ automatic: true |
|
| 221 |
+ containerNames: |
|
| 222 |
+ - nginx |
|
| 223 |
+ from: |
|
| 224 |
+ kind: ImageStreamTag |
|
| 225 |
+ name: nginx:latest |
|
| 226 |
+ type: ImageChange |
|
| 227 |
+ - imageChangeParams: |
|
| 228 |
+ automatic: true |
|
| 229 |
+ containerNames: |
|
| 230 |
+ - web |
|
| 231 |
+ from: |
|
| 232 |
+ kind: ImageStreamTag |
|
| 233 |
+ name: web:latest |
|
| 234 |
+ type: ImageChange |
|
| 235 |
+ status: {}
|
|
| 236 |
+- apiVersion: v1 |
|
| 237 |
+ kind: Service |
|
| 238 |
+ metadata: |
|
| 239 |
+ creationTimestamp: null |
|
| 240 |
+ name: redis |
|
| 241 |
+ spec: |
|
| 242 |
+ ports: |
|
| 243 |
+ - name: 6379-tcp |
|
| 244 |
+ port: 6379 |
|
| 245 |
+ targetPort: 6379 |
|
| 246 |
+ selector: |
|
| 247 |
+ deploymentconfig: db |
|
| 248 |
+ status: |
|
| 249 |
+ loadBalancer: {}
|
|
| 250 |
+- apiVersion: v1 |
|
| 251 |
+ kind: Service |
|
| 252 |
+ metadata: |
|
| 253 |
+ creationTimestamp: null |
|
| 254 |
+ name: nginx |
|
| 255 |
+ spec: |
|
| 256 |
+ ports: |
|
| 257 |
+ - name: 80-tcp |
|
| 258 |
+ port: 80 |
|
| 259 |
+ targetPort: 80 |
|
| 260 |
+ - name: 3000-tcp |
|
| 261 |
+ port: 3000 |
|
| 262 |
+ targetPort: 3000 |
|
| 263 |
+ selector: |
|
| 264 |
+ deploymentconfig: nginx |
|
| 265 |
+ status: |
|
| 266 |
+ loadBalancer: {}
|
| 0 | 267 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,30 @@ |
| 0 |
+web: |
|
| 1 |
+ build: ./app |
|
| 2 |
+ volumes: |
|
| 3 |
+ - "./app:/src/app" |
|
| 4 |
+ ports: |
|
| 5 |
+ - "3030:3000" |
|
| 6 |
+ links: |
|
| 7 |
+ - "db:redis" |
|
| 8 |
+ command: nodemon -L app/bin/www |
|
| 9 |
+ cpu_shares: 512 |
|
| 10 |
+ |
|
| 11 |
+nginx: |
|
| 12 |
+ restart: always |
|
| 13 |
+ build: ./nginx/ |
|
| 14 |
+ ports: |
|
| 15 |
+ - "80:80" |
|
| 16 |
+ volumes: |
|
| 17 |
+ - /www/public |
|
| 18 |
+ volumes_from: |
|
| 19 |
+ - web |
|
| 20 |
+ links: |
|
| 21 |
+ - web:web |
|
| 22 |
+ |
|
| 23 |
+db: |
|
| 24 |
+ image: redis |
|
| 25 |
+ ports: |
|
| 26 |
+ - "6379:6379" |
|
| 27 |
+ cpu_quota: 10000 |
|
| 28 |
+ mem_limit: 1000000000 |
|
| 29 |
+ cpuset: 1 |