diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3b6714b --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +inc/check_status diff --git a/inc/check_status.go b/inc/check_status.go new file mode 100644 index 0000000..1d0c89e --- /dev/null +++ b/inc/check_status.go @@ -0,0 +1,150 @@ +package main + +import ( + "fmt" + "os" + "net/http" + "time" + "io/ioutil" + "encoding/json" +) + +// a struct to hold the result from each request including an index +// which will be used for sorting the results after they come in +type result struct { + id int + status int + err error +} + +type check struct { + // defining struct variables + Url string `json:"url"` + Id int `json:"id"` + Computers_id int `json:"computers_id"` + Status string `json:"status"` +} + + +// boundedParallelGet sends requests in parallel but only up to a certain +// limit, and furthermore it's only parallel up to the amount of CPUs but +// is always concurrent up to the concurrency limit +func boundedParallelGet(Checks []check, concurrencyLimit int) []check { + + // this buffered channel will block at the concurrency limit + semaphoreChan := make(chan struct{}, concurrencyLimit) + + // this channel will not block and collect the http request results + resultsChan := make(chan *result) + + // make sure we close these channels when we're done with them + defer func() { + close(semaphoreChan) + close(resultsChan) + }() + + // keen an index and loop through every url we will send a request to + for _,chck := range Checks { + + // start a go routine with the index and url in a closure + go func(chck check) { + var status int + + // this sends an empty struct into the semaphoreChan which + // is basically saying add one to the limit, but when the + // limit has been reached block until there is room + semaphoreChan <- struct{}{} + + // send the request and put the response in a result struct + // along with the index so we can sort them later along with + // any error that might have occoured + //fmt.Println(chck.Url) + + client := http.Client{ + Timeout: 7 * time.Second, + } + + res, err := client.Get(chck.Url) + + status = 404 + if err == nil { + status = res.StatusCode + } + + result := &result{chck.Id, status, err} + // now we can send the result struct through the resultsChan + resultsChan <- result + + // once we're done it's we read from the semaphoreChan which + // has the effect of removing one from the limit and allowing + // another goroutine to start + <-semaphoreChan + }(chck) + } + + // make a slice to hold the results we're expecting + var results []result + + // start listening for any results over the resultsChan + // once we get a result append it to the result slice + for { + result := <-resultsChan + results = append(results, *result) + + // if we've reached the expected amount of urls then stop + if len(results) == len(Checks) { + break + } + } + + var ret []check + for i := range results { + ch := m[results[i].id] + if results[i].status == 200 { + ch.Status = "OK" + ret = append(ret,*ch) + } + } + // now we're done we return the results + return ret +} + +// we'll use the init function to set up the benchmark +// by making a slice of 100 URLs to send requets to +var urls []string +var checks []check +var m = make(map[int]*check) +func init() { + + bytes, _ := ioutil.ReadAll(os.Stdin) + json.Unmarshal(bytes , &checks) + for i := range checks { + m[checks[i].Id] = &checks[i] + } +} + +// the main function sets up an anonymous benchmark func +// that will time how long it takes to get all the URLs +// at the specified concurrency level +// +// and you should see something like the following printed +// depending on how fast your computer and internet is +// +// 5 bounded parallel requests: 100/100 in 5.533223255 +// 10 bounded parallel requests: 100/100 in 2.5115351219 +// 25 bounded parallel requests: 100/100 in 1.189462884 +// 50 bounded parallel requests: 100/100 in 1.17430002 +// 75 bounded parallel requests: 100/100 in 1.001383863 +// 100 bounded parallel requests: 100/100 in 1.3769354 +func main() { + //startTime := time.Now() + results := boundedParallelGet(checks, 100) + //seconds := time.Since(startTime).Seconds() + //tmplate := "requests: %d/%d in %v" + + // fmt.Printf(tmplate, len(results), len(checks), seconds) + + val,_ := json.MarshalIndent(results, "", " ") + fmt.Print(string(val)) +} + diff --git a/inc/remote_status.php b/inc/remote_status.php new file mode 100644 index 0000000..a2f3a12 --- /dev/null +++ b/inc/remote_status.php @@ -0,0 +1,118 @@ +. + + ------------------------------------------------------------------------ + + @package FusionInventory + @author Walid Nouh + @co-author + @copyright Copyright (c) 2010-2011 FusionInventory team + @license AGPL License 3.0 or (at your option) any later version + http://www.gnu.org/licenses/agpl-3.0-standalone.html + @link http://www.fusioninventory.org/ + @link http://forge.fusioninventory.org/projects/fusioninventory-for-glpi/ + @since 2010 + + ------------------------------------------------------------------------ + */ + +include ("../../../inc/includes.php"); + +declare(ticks=1); + + +global $DB,$agents; + + $check_arr = []; + $pfInventoryComputerComputer = new PluginFusioninventoryInventoryComputerComputer(); + foreach (getAllDataFromTable(PluginFusioninventoryAgent::getTable()) as $a) { + + $check = []; + $a_computerextend = $pfInventoryComputerComputer->hasAutomaticInventory($a["computers_id"]); + + $check["url"] = "http://".$a_computerextend["remote_addr"].":62354/status"; + $check["id"] = $a["id"]; + $check["computers_id"] = $a["computers_id"]; + $check["status"] = "unknown"; + + $check_arr[] = $check; + //print_r($agent->getAgentStatusURLs()); +} + + +$descriptorspec = array( + 0 => array("pipe", "r"), // stdin is a pipe that the child will read from + 1 => array("pipe", "w"), // stdout is a pipe that the child will write to + 2 => array("file", "/tmp/error-output.txt", "a") // stderr is a file to write to +); + +$cwd = '/tmp'; +$env = array('debug' => 'false'); + +$process = proc_open(__DIR__.'/bench_urls', $descriptorspec, $pipes, $cwd, $env); + +if (is_resource($process)) { + // $pipes now looks like this: + // 0 => writeable handle connected to child stdin + // 1 => readable handle connected to child stdout + // Any error output will be appended to /tmp/error-output.txt + + fwrite($pipes[0], json_encode($check_arr)); + fclose($pipes[0]); + + $checked = json_decode(stream_get_contents($pipes[1])); + fclose($pipes[1]); + + // It is important that you close any pipes before calling + // proc_close in order to avoid a deadlock + $return_value = proc_close($process); + + echo "command returned $return_value\n"; +} + + $DB->update("glpi_computers", [ + 'states_id' => NULL ] , + [ '1' => '1' ] + ); + + +foreach ($checked as $s) { + echo $s->computers_id." "; + + $comp = new Computer(); + $comp->getFromDB($s->computers_id); + $comp->fields["states_id"] = 2; + $DB->update("glpi_computers", [ + 'states_id' => $comp->fields["states_id"] ], + [ 'id' => $s->computers_id ] + ); + echo $comp->fields["contact"]."\n"; + +} + +// print_r($a_computerextend); +exit(0); diff --git a/inc/remotesupport.class.php b/inc/remotesupport.class.php index d69d0c4..6b661bc 100644 --- a/inc/remotesupport.class.php +++ b/inc/remotesupport.class.php @@ -6,8 +6,7 @@ if (!defined('GLPI_ROOT')) { -class PluginRemoteSupportComputer extends CommonDBTM { - +class PluginRemotesupportRemotesupport extends CommonDBTM { static function showInfo($item) { $fi_path = Plugin::getWebDir('fusioninventory'); @@ -28,7 +27,7 @@ class PluginRemoteSupportComputer extends CommonDBTM { echo ''; echo ''; - $url .= "
  • " . $a_computerextend['remote_addr'] . "
  • "; + $url = "
  • " . $a_computerextend['remote_addr'] . "
  • "; if ($url != ""){ echo "