aboutsummaryrefslogtreecommitdiffstats
path: root/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml
blob: 4f8ca005dce6d298b540a943fc44680466a13d2e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
- include_vars: "{{ ansible_os_family }}.yml"

- name: Install yum epel-release
  command: yum -y install epel-release
  when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'

- name: Install yum packages
  yum:
    pkg: "{{ item }}"
    state: "present"
  with_items: "{{ packages }}"
  when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'

- name: Install apt packages
  apt:
    pkg: "{{ item }}"
    state: "present"
    update_cache: 'yes'
  with_items: "{{ packages }}"
  when: ansible_os_family == 'Debian'
s="kt">int ret; if (!is_io_mapping_possible(base, size)) return -EINVAL; ret = io_reserve_memtype(base, base + size, &pcm); if (ret) return ret; *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm)); return 0; } EXPORT_SYMBOL_GPL(iomap_create_wc); void iomap_free(resource_size_t base, unsigned long size) { io_free_memtype(base, base + size); } EXPORT_SYMBOL_GPL(iomap_free); void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { pte_t pte = pfn_pte(pfn, prot); unsigned long vaddr; int idx, type; preempt_disable(); pagefault_disable(); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); WARN_ON(!pte_none(*(kmap_pte - idx))); #ifdef CONFIG_PREEMPT_RT_FULL current->kmap_pte[type] = pte; #endif set_pte(kmap_pte - idx, pte); arch_flush_lazy_mmu_mode(); return (void *)vaddr; } /* * Map 'pfn' using protections 'prot' */ void __iomem * iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { /* * For non-PAT systems, translate non-WB request to UC- just in * case the caller set the PWT bit to prot directly without using * pgprot_writecombine(). UC- translates to uncached if the MTRR * is UC or WC. UC- gets the real intention, of the user, which is * "WC if the MTRR is WC, UC if you can't do that." */ if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB) prot = __pgprot(__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); } EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); void iounmap_atomic(void __iomem *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; if (vaddr >= __fix_to_virt(FIX_KMAP_END) && vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { int idx, type; type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); #ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); #endif /* * Force other mappings to Oops if they'll try to access this * pte without first remap it. Keeping stale mappings around * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ #ifdef CONFIG_PREEMPT_RT_FULL current->kmap_pte[type] = __pte(0); #endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); } pagefault_enable(); preempt_enable(); } EXPORT_SYMBOL_GPL(iounmap_atomic);